blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c5a8e18c89224eccf505020eafb6f868ac103a1
|
641b49ff4a4839aa70083ef169e44302574f8651
|
/app/snippets/models.py
|
bd24e78274688f04dad1b52b28803be35b8f4099
|
[] |
no_license
|
bear-engineer/rest_framework_Django_tutorial
|
ca06c7ce13b5d9028e52c603c92f5ae345300dfb
|
17d87b6930f1fbcab7d04d59c2453db34680a1e3
|
refs/heads/master
| 2021-09-19T06:42:16.102751
| 2018-07-24T08:04:35
| 2018-07-24T08:04:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
from django.conf import settings
from django.db import models
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_all_lexers, get_lexer_by_name
from pygments.styles import get_all_styles
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted((item, item) for item in get_all_styles())
class Snippet(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(choices=LANGUAGE_CHOICES, default='python', max_length=100)
style = models.CharField(choices=STYLE_CHOICES, default='friendly', max_length=100)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='snippets')
highlighted = models.TextField()
class Meta:
ordering = ('created',)
def save(self, *args, **kwargs):
## 이렇게 하면 html이 저장됩니다.
# 지정한 언어(language)에 대한 분석기 (lexer)할당
lexer = get_lexer_by_name(self.language)
# 줄 표시 여부
linenos = 'table' if self.linenos else False
# self.title이 존재하면 options에 'title'키가 들어있는 dict를 전달
options = {'title': self.title} if self.title else {}
# 위에서 지정한 여러 변수를 사용해서 formatter객체 생성
formatter = HtmlFormatter(
style=self.style,
linenos=linenos,
full=True,
**options,
)
self.highlighted = highlight(self.code, lexer, formatter)
super().save(*args, **kwargs)
|
[
"d.sehyeon@gmail.com"
] |
d.sehyeon@gmail.com
|
c9d59463ba63d661defdc082ac220006f7beb760
|
2cea7251aac4b05f8169d7b5d26cadbf518136cb
|
/models/Card.py
|
ed2b7e946d6bff97782ef8698b699e92caaa5f57
|
[] |
no_license
|
VanDenHendeSimon/blackjack
|
85e3221ab0fa036b7f78a5cd141e146771f754ed
|
3834edf87ccb7f1ec8672c2943c10e0279387741
|
refs/heads/master
| 2021-05-25T15:14:56.032834
| 2020-04-22T19:48:50
| 2020-04-22T19:48:50
| 253,804,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
class Card:
character_aliasses = {
1: 'Ace',
11: 'Jack',
12: 'Queen',
13: 'King',
}
def __init__(self, character, suit):
self.character = character
self.suit = suit
@property
def suit(self):
return self._suit
@suit.setter
def suit(self, value):
self._suit = value
@property
def character(self):
# 10s and jacks/queens/kings are treated the same overall (also for splitting)
return min(self._character, 10)
# return self._character
@character.setter
def character(self, value):
self._character = value
@property
def character_alias(self):
# Lookup the character, if its not in the table, set it to the given value
return Card.character_aliasses.get(self.character, self.character)
def __str__(self):
return '%s of %s' % (self.character_alias, self.suit)
def __repr__(self):
return self.__str__()
|
[
"simonvdhende@outlook.com"
] |
simonvdhende@outlook.com
|
74ca68420b60222f058228f98a1a446f42d5311d
|
0e3f14722cd87767d29f794530dc1eabc4678a14
|
/projects/migrations/0001_initial.py
|
bf6df9a575080f5727e6d0d3115ebfc864eafca8
|
[] |
no_license
|
Mostaquim/mycampaign
|
e807386b5bc034c0bf8689f29da07bae752ef971
|
4343ff08cb7d86de3efcc3e81b49ca93d01e7ae9
|
refs/heads/master
| 2020-05-09T23:51:06.345794
| 2019-05-09T10:24:22
| 2019-05-09T10:24:22
| 181,513,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,738
|
py
|
# Generated by Django 2.1 on 2019-05-06 18:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currency', models.IntegerField(choices=[(1, '£')])),
('sent_date', models.DateField(auto_now_add=True)),
('issue_date', models.DateField()),
('due_date', models.DateField()),
('paid_date', models.DateField(null=True)),
('terms', models.TextField()),
('discount', models.DecimalField(decimal_places=2, max_digits=11)),
('tax', models.DecimalField(decimal_places=2, max_digits=11)),
('total', models.DecimalField(decimal_places=2, max_digits=11)),
('status', models.IntegerField(choices=[(1, 'Sent'), (2, 'Open'), (3, 'Paid'), (4, 'Partially paid'), (5, 'Cancelled')])),
('second_tax', models.DecimalField(decimal_places=2, max_digits=11)),
],
),
migrations.CreateModel(
name='InvoiceItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=11)),
('description', models.TextField()),
('value', models.DecimalField(decimal_places=2, max_digits=11)),
('name', models.CharField(max_length=255, null=True)),
('item_type', models.CharField(max_length=255, null=True)),
],
),
migrations.CreateModel(
name='PrintingOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pages', models.IntegerField(choices=[(1, 'Single Sided'), (2, 'Double Sided'), (3, '2 Pages'), (4, '4 Pages'), (5, '6 Pages'), (6, '8 Pages'), (7, '10 Pages'), (8, '12 Pages')])),
('page_orientation', models.IntegerField(choices=[(1, 'Portrait'), (2, 'Landscape')])),
('colours', models.IntegerField(choices=[(1, '1/0-coloured Black'), (2, '2/0-coloured Black + Pantone'), (3, '2/0-coloured Black + Gold'), (4, '4/0-coloured CMYK')])),
('processing', models.IntegerField(choices=[(1, 'Trimming'), (2, 'Trimming Corner Rounded')])),
('priority', models.IntegerField(choices=[(1, 'Low'), (2, 'Normal'), (3, 'High'), (4, 'Urgent')], default=1)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_of_service', models.IntegerField(choices=[(1, 'Business To Business'), (2, 'Hand To Hand'), (3, 'Direct Mail'), (4, 'Residential Homes'), (5, 'Shared Distribution'), (6, 'Consultation Distribution')], default=1)),
('number_of_boxes', models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4 or more'), (5, 'N/A')], default=1)),
('type_of_media', models.IntegerField(choices=[(1, 'Flyer'), (2, 'Leaflet'), (3, 'Folded Leaflet'), (4, 'Other')], default=1)),
('require_collection', models.IntegerField(choices=[(1, 'Yes'), (2, 'No')], default=1)),
('quantity_of_flyers', models.IntegerField(null=True)),
('title_of_media', models.CharField(max_length=255, null=True)),
('campaign_details', models.TextField(max_length=255)),
('agreed_cost', models.DecimalField(decimal_places=2, max_digits=11)),
('campaign_start_date', models.DateField()),
('campaign_finish_date', models.DateField()),
('special_instruction', models.TextField()),
('progress', models.IntegerField(default=1)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('attachments', models.ManyToManyField(to='core.Attachments')),
('company', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='company', to='accounts.Company')),
('project_admin', models.ForeignKey(limit_choices_to={'staff': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='project_admin', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProjectActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('subject', models.CharField(max_length=255)),
('message', models.TextField()),
('acitivity_type', models.CharField(max_length=255)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"princemostaquim@gmail.com"
] |
princemostaquim@gmail.com
|
a1779179c2054f6e836a42d2ce8325c8f5b7106b
|
234a7517b819d7291dfbcda8d8cbc46a53509dd8
|
/sgr/threads/models.py
|
c079cb09c372a1e8188fcfa3a93fd1c46490dfd9
|
[] |
no_license
|
hkaranjule77/Student-Grievance-Redressal
|
7608ea488e47d22691adfa12daa5e2c980cec223
|
a8ea6cd773f53cbd7adee468749a67639493da0c
|
refs/heads/master
| 2022-11-20T12:24:15.542252
| 2020-07-27T18:38:11
| 2020-07-27T18:38:11
| 268,457,636
| 1
| 2
| null | 2020-07-16T21:54:11
| 2020-06-01T07:47:31
|
Python
|
UTF-8
|
Python
| false
| false
| 15,780
|
py
|
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Q
from django.db.utils import IntegrityError
from django.utils import timezone
from datetime import date
import os
from sgr.settings import BASE_DIR
from user.models import Member
# GLOBAL CONSTANTS
FILE_PATH= os.path.join( BASE_DIR, 'data_files' )
class Category( models.Model ) :
code = models.CharField( primary_key = True, max_length = 1 )
name = models.CharField( max_length = 25 )
#Constants
global FILE_PATH
CATEGORY_PATH = os.path.join( FILE_PATH, 'categories.txt' )
def get_category( request, category ) :
''' Returns Category Object of specified category string. '''
try :
category_obj = Category.objects.get( name = category )
except ObjectDoesNotExist :
messages.error( request, f' No such Category exists. ' )
return None
else :
return category_obj
def get_code_name_list() :
''' Returns a List of code, name of all Category objects. '''
cat_obj = Category.objects.all()
category_list = [ [ obj.code, obj.name ] for obj in cat_obj ]
return category_list
def get_list():
''' Returns a List of all Category name. '''
category_qs = Category.objects.all()
category_list = [ category.name for category in category_qs ]
return category_list
def load_data() :
''' Loads Categories from file in Database. '''
# loading data from file & preprocessing the data
print( "Loading Categories from file into Database..." )
category_file = open( Category.CATEGORY_PATH, 'r' ) # opens file in read mode
category_data = category_file.read() # reads all data from file
list_of_code_name = category_data.split( ';\n' ) # divides data in category data
list_of_code_name = list_of_code_name[ : len( list_of_code_name ) -1 ] # removing last empty line
for index in range( len( list_of_code_name ) ) :
list_of_code_name[ index ] = list_of_code_name[ index ].split( ',' ) # separates code and name
# adding category into database
categories_added = 0 # counts category updated in database
for code_name in list_of_code_name :
# checks if category already exist in DB
try :
category_obj = Category.objects.get( name = code_name[ 1 ] )
except ObjectDoesNotExist : # occurs when if fetched object is not in DB
# adds category in DB
category_obj = Category( name = code_name[ 1 ], code = code_name[0] ) # initialization
try :
category_obj.save() # saves Category object
categories_added += 1 # updating category update count
print( f"Category '{ category_obj.name }' is added with code {category_obj.code }. ")
except IntegrityError :
print( f" Code '{ code_name[ 0 ] }' already exist with another Category ' skipped Category '{ code_name[ 1 ] }' for now change. " )
if categories_added != 0 :
print( f' Added { categories_added } new Categories in Database Successfully. ' )
else :
print( ' Categories are already up-to date. ' )
class SubCategory( models.Model ) :
''' Model Class for storing different categories of Complain and Thread Model '''
code = models.CharField( max_length = 1 )
name = models.CharField( max_length = 25 )
category = models.ForeignKey( Category, on_delete = models.CASCADE )
# constant
global FILE_PATH
SUBCATEGORY_PATH = os.path.join( FILE_PATH, 'subcatgories.txt')
def get_code_name_list() :
''' Returns a List of [ code, name ] of Sub Category object nested according to category. '''
final_list = list()
for cat_obj in Category.objects.all() :
sub_cat_obj = SubCategory.objects.filter( category = cat_obj )
sub_list = [ [ sub_category.code, sub_category.name ] for sub_category in sub_cat_obj ]
final_list.append( sub_list )
return final_list
def get_list( request, category ):
''' Returns a List of Subcategories based on passed category string. '''
category_obj = Category.get_category( request, category )
if category_obj is not None :
print( category_obj.name )
sub_cat_obj = SubCategory.objects.filter( category = category_obj )
sub_cat_list = [ sub_category.name for sub_category in sub_cat_obj ]
print( sub_cat_list )
return sub_cat_list
def load_data() :
''' Loads Subcategories from file into DB. '''
subcategory_file = open( SubCategory.SUBCATEGORY_PATH, 'r' )
subcategory_data = subcategory_file.read()
subcategory_data = subcategory_data.split( ';\n') # divides subcategory, category-wise
subcategory_data = subcategory_data[ : len( subcategory_data ) - 1 ] # deleting last blank line
for category_wise in subcategory_data :
category_wise = category_wise.split( ';' )
category_wise[0] = category_wise[0].split( ',' )
subcategories = category_wise[ 1 : ]
try :
category_obj = Category.objects.get( code = category_wise[0][0], name = category_wise[0][1] )
except ObjectDoesNotExist :
print( f"Error : No category exist by name '{ category_wise[0] }' so can't update sub-categories { subcategories }. " )
else :
for subcategory in subcategories :
subcategory = subcategory.split( ',' )
# checks if SubCategory already exists in DB
try :
subcategory_obj = SubCategory.objects.get( name = subcategory[ 1 ], category = category_obj )
print( f" SubCategory '{ subcategory_obj.name }' already exist in DB. " )
continue
except ObjectDoesNotExist :
#checks if code is not taken by other SubCategory in a category_wise manner
try :
subcategory_obj = SubCategory.objects .get( code = subcategory[0], category = category_obj )
print( f" Another SubCategory '{ subcategory_obj.name }' exist with code '{ subcategory_obj.code }'. " )
continue
except ObjectDoesNotExist :
# saves subcategory if not present
subcategory_obj = SubCategory( code = subcategory[0] )
subcategory_obj.name = subcategory[1]
subcategory_obj.category = category_obj
subcategory_obj.save()
print( f" Added new SubCategory with code '{ subcategory_obj.code }' and name { subcategory_obj.name } " )
class Redressal( models.Model ) :
''' Redressal Model for Complain / Thread Model with actions for HOD / Principal '''
text = models.TextField( null = True, blank = True )
file = models.FileField( upload_to = 'thread-redressal/', null = True, blank = True )
added_by = models.ForeignKey(
Member,
related_name = '+',
on_delete = models.CASCADE,
null = True,
blank = True
)
added_at = models.DateTimeField( null = True, blank = True )
# action of accept / reject of redressal by HOD / Principal.
action = models.CharField( default = '', max_length = 15 ) # actions - APPROVE / REJECT
action_msg = models.TextField( null = True )
action_by = models.ForeignKey(
Member,
related_name = 'member_on_ thread+',
on_delete = models.CASCADE,
null = True,
blank = True
)
action_at = models.DateTimeField( null = True, blank = True )
def approve( self, member ):
''' Approves the redressal and saves changes for approval in Thread object. '''
self.action = 'APPROVE'
self.action_by = member
self.action_at = timezone.now()
self.action_msg = ''
self.save( update_fields = [
'action',
'action_at',
'action_by',
'action_msg',
]
)
def init_for_reject( self, request, member ):
''' Initialize the thread object with rejection data received by post method. '''
self.action = 'REJECT'
self.action_msg = request.POST.get( 'rejection_msg')
self.action_at = timezone.now()
self.action_by = member
def is_reject_valid( self ):
''' Checks if rejection message if not blank. '''
if self.action_msg == '' or self.action_msg == None:
return False
return True
def reject( self ):
''' Rejects redressal and saves the changes accordingly in Thread model. '''
self.save( update_fields = [
'action',
'action_msg',
'action_by',
'action_at',
]
)
class Thread( models.Model ) :
# required data
id = models.CharField( primary_key = True, max_length = 15 )
title = models.CharField(max_length = 25)
category = models.CharField( max_length = 25 )
sub_category = models.CharField( max_length = 25 )
description = models.TextField()
complain_count = models.IntegerField( default = 0 )
note_count = models.IntegerField( default = 0 )
created_by = models.ForeignKey( Member, on_delete = models.CASCADE )
created_at = models.DateTimeField( default = timezone.now )
# for solving
solver = models.ForeignKey(
Member,
related_name = 'solver_member+',
on_delete = models.SET_NULL,
null = True,
blank = True
)
solving_date = models.DateField( null = True, blank = True )
# redressal
redressal = models.OneToOneField( Redressal, on_delete = models.CASCADE, null = True, blank = True )
# constants
SEARCH_TYPES = ( 'All', 'Title', 'Description', 'Created by', )
FILTER_OPTIONS = ( 'All', 'Approved', 'Redressed', 'Rejected', 'Unredressed' )
def __str__( self ) :
''' return a string of Thread id when object is called for printing purpose. '''
return str( self.id )
def generate_id(self, category, sub_category):
''' Generates and initialize id for object when called. '''
categories = Category.get_code_name_list()
sub_categories = SubCategory.get_code_name_list()
today = date.today()
curr_date = today.strftime('%y%m%d')
# opening file in reading mode
count_file = open(os.path.join(BASE_DIR, 'count_files/thread_id.txt'), 'r')
# preprocessing of data - splitting a single string into list of lines
count_data = count_file.read().split('\n')
# opening file in writing mode
count_file = open(os.path.join(BASE_DIR, 'count_files/thread_id.txt'), 'w')
# if first line of date does not match with current date
if curr_date != count_data[0]:
print( 1 )
data = ''
category_index = 0
for category_wise in sub_categories:
for sub_code, sub_cat in category_wise:
if sub_cat == sub_category:
data+='1 '
code = categories[ category_index ][ 0 ] + sub_code
else:
data+='0 '
data+='\n'
category_index += 1
data = curr_date+ '\n' + data
count_file.write(data)
count_file.close()
generated_id = '0'
else:
print( 2 )
# preprocessing of data / conversion into list of counts from string
for index in range(len(count_data)):
count_data[index] = count_data[index].split(' ')
# writes date in first line of the opened count file
count_file.write(curr_date+'\n')
print( count_data, 'count_data' )
# count incrementing part
cat_index = 1
for cat_code, cat in categories:
sub_index = 0
for sub_cat_code, sub in sub_categories[cat_index-1]:
if (sub == sub_category and cat == category):
print(sub, cat)
try:
generated_id = count_data[cat_index][sub_index]
except IndexError:
count_data[cat_index][sub_index] = '1'
generated_id = '0'
else:
count_data[cat_index][sub_index] = str( int(generated_id) + 1 )
# generates code from category, sub_category, required for id
code = cat_code + sub_cat_code
# writes count for every sub_category in file
count_file.write(count_data[cat_index][sub_index]+' ')
sub_index += 1
#creates new line in count file before start iterating for next category
count_file.write('\n')
cat_index += 1
count_file.close()
if int( generated_id ) < 10 :
generated_id = curr_date + code + generated_id
print(generated_id, 'id id')
self.id = generated_id
def get_thread( request, id_no ):
''' Returns Thread with specified id if present or else returns messages and None. '''
try:
#id_no = int( id_no )
thread = Thread.objects.get( id = id_no )
except ObjectDoesNotExist:
messages.error( request, f' Thread { id } does not exist. ' )
thread = None
return thread
def increase_complain_count( self ):
''' Increasea and saves the count of complaint in Thread model. '''
self.complain_count += 1
self.save( update_fields=[ 'complain_count' ] )
def increase_note_count( self ):
''' Increases and saves the count of note in Thread model. '''
self.note_count += 1
self.save( update_fields = [ 'note_count' ] )
def init_for_add( request, member ):
''' Initializes new Thread object with data received by post method. '''
thread = Thread()
thread.title = request.POST.get( 'title' )
thread.category = request.POST.get( 'category' )
thread.sub_category = request.POST.get( 'sub_category' )
thread.description = request.POST.get( 'description' )
thread.created_by = member
return thread
def init_for_redressal( self , request, member ):
''' Initializes the Thread object with the redressal data recieved through post method. '''
redressal = Redressal()
redressal.text = request.POST.get( 'redressal' )
redressal.file = request.FILES.get( 'redressal_file' )
redressal.added_by = member
redressal.added_at = timezone.now()
self.redressal = redressal
def is_add_valid( self, request ):
''' Validates data initialized by method 'init_for_all' before saving in DB. '''
valid = True
if self.title == '' or self.title == None:
valid = False
elif self.category == '' or self.category == None or self.category == 'Select Category':
valid = False
elif self.sub_category == '' or self.sub_category == None or self.sub_category == 'Select Sub Category':
valid = False
elif self.description == '' or self.description == None or self.description == 'Add description here...':
valid = False
elif self.created_by == None:
valid = False
if valid == True :
self.generate_id( self.category, self.sub_category )
return valid
def is_redress_valid( self ):
''' Returns True if initialized redressal data of Thread object is valid or else returns False. '''
valid = True
if self.redressal is None :
valid = False
elif self.redressal.text == '' or self.redressal.text == None:
valid = False
elif self.redressal.added_by is None:
valid = False
elif self.redressal.added_at is None:
valid = False
return valid
def redress( self ):
''' Saves initialized redressal data in Thread model. '''
self.redressal.save()
self.save( update_fields = [ 'redressal' ] )
def search( query, search_type ):
''' Single function for search of Thread objects. '''
search_qs = Thread.objects.none()
if search_type == Thread.SEARCH_TYPES[0] or search_type == Thread.SEARCH_TYPES[1] :
search_qs.union( Q( title__icontains = query ) )
elif search_type == Thread.SEARCH_TYPES[0] or search_type == Thread.SEARCH_TYPES[2] :
search_qs.union( Q( description__icontains = query ) )
elif search_type == Thread.SEARCH_TYPES[0] or search_type == Thread.SEARCH_TYPES[3] :
search_qs.union( ( Q( created_by__mid__icontains = query ) |
Q( created_by__user__first_name__icontains = query ) |
Q( created_by__user__last_name__icontains = query )
)
)
return search_qs
def filter_qs( queryset, filter_option ) :
''' Filters the passed queryset according to passed filter_option. '''
print( filter_option, queryset )
if filter_option == Thread.FILTER_OPTIONS[ 1 ] :
final_qs = queryset.exclude( redressal = None ).filter( redressal__action = 'APPROVE' )
print(1)
elif filter_option == Thread.FILTER_OPTIONS[ 2 ] :
print(2)
final_qs = queryset.exclude( redressal = None )
elif filter_option == Thread.FILTER_OPTIONS[ 3 ] :
final_qs = queryset.exclude( redressal = None ).filter( redressal__action = 'REJECT' )
print(3)
elif filter_option == Thread.FILTER_OPTIONS[ 4 ] :
print(4)
final_qs = queryset.filter( redressal = None )
print( final_qs )
return final_qs
|
[
"hkaranjule77@gmail.com"
] |
hkaranjule77@gmail.com
|
c56f4e4fb4ccc6672145c456c1c1d50ffbfd1d54
|
eb91c2d2560a3e4ce35ebc2d6550f001579c03c5
|
/codeforces/1353/B.py
|
9f295a1f5fb9c4df31d57960b9fb7930b9a23708
|
[] |
no_license
|
kamojiro/codeforces
|
0a3a40c8cab96a0257bb9d6ed53de217192cbabb
|
9e66297fa3025ba6731111ab855096d579e86c67
|
refs/heads/master
| 2021-07-07T11:47:42.373189
| 2020-08-15T14:45:36
| 2020-08-15T14:45:36
| 176,296,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
#import sys
#input = sys.stdin.readline
def main():
N = int( input())
for _ in range(N):
n, k = map( int, input().split())
A = list( map( int, input().split()))
B = list( map( int, input().split()))
A.sort()
B.sort(reverse=True)
ans = 0
for i in range(n):
if i+1 <= k:
if A[i] < B[i]:
ans += B[i]
continue
ans += A[i]
print( ans)
if __name__ == '__main__':
main()
|
[
"tamagoma002@yahoo.co.jp"
] |
tamagoma002@yahoo.co.jp
|
d67cb6277c5180abb77f9cba3af67d2bd99dc8a0
|
ef66e297a49d04098d98a711ca3fda7b8a9a657c
|
/Python/9.2.2 use unbound superclass construction method.py
|
74daaf9ea6794b77af6bf03a25c5d3f357fe4796
|
[] |
no_license
|
breezy1812/MyCodes
|
34940357954dad35ddcf39aa6c9bc9e5cd1748eb
|
9e3d117d17025b3b587c5a80638cb8b3de754195
|
refs/heads/master
| 2020-07-19T13:36:05.270908
| 2018-12-15T08:54:30
| 2018-12-15T08:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
class Bird:
def __init__(self):
self.hungry=True
def eat(self):
if self.hungry:
print 'Aaaah...'
self.hungry=False
else:
print 'No,thanks!'
class SongBird(Bird):
def __init__(self):
Bird.__init__(self)
self.sound='Squawk!'
def sing(self):
print self.sound
sb=SongBird()
sb.sing()
sb.eat()
sb.eat()
|
[
"449195172@qq.com"
] |
449195172@qq.com
|
2b612f6eea0c6ac37a27d2d8fb6083285ff16073
|
19bc4d44dc7303e23a6949b1bc7b98b65bcf80e9
|
/python/Linear Regression in Python/Simple Linear Regression/Linear Regression at Codecademy/script.py
|
661d035628a95c8b252a74e85e4a4024c02fe7a9
|
[] |
no_license
|
henry1034/Challenge-Project-of-CodeCademy
|
c66190ff3a318e22f263fcf78344632773065c24
|
61ebe84696cec120393acca62b4fce4bdea0fb30
|
refs/heads/master
| 2023-07-04T01:04:16.978374
| 2021-07-29T17:27:56
| 2021-07-29T17:27:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,583
|
py
|
# Load libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import codecademylib3
# Read in the data
codecademy = pd.read_csv('codecademy.csv')
# Print the first five rows
print(codecademy.head())
# Create a scatter plot of score vs completed
plt.scatter(codecademy.completed, codecademy.score)
# Show then clear plot
plt.show()
plt.clf()
# Fit a linear regression to predict score based on prior lessons completed
model = sm.OLS.from_formula(
"score ~ completed",
data = codecademy
)
result = model.fit()
print(result.params)
# Intercept interpretation:
print("A learner who has previously completed 0 content items is expected to earn a quiz score of 13.2 points.")
# Slope interpretation:
print("Students who have completed one additional prior content item are expected to score 1.3 points higher on the quiz.")
# Plot the scatter plot with the line on top
plt.scatter(codecademy.completed, codecademy.score)
plt.plot(codecademy.completed, result.predict(codecademy))
# Show then clear plot
plt.show()
plt.clf()
# Predict score for learner who has completed 20 prior lessons
print(result.predict({'completed':[20]}))
intercept = result.params[0]
slope = result.params[1]
print(slope * 20 + intercept)
# Calculate fitted values
fitted_values = result.predict(codecademy)
# Calculate residuals
residuals = codecademy.score - fitted_values
# Check normality assumption
plt.hist(residuals)
# Show then clear the plot
plt.show()
plt.clf()
# Check homoscedasticity assumption
plt.scatter(fitted_values, residuals)
# Show then clear the plot
plt.show()
plt.clf()
# Create a boxplot of score vs lesson
sns.boxplot(
data = codecademy,
x = "lesson",
y = "score"
)
# Show then clear plot
plt.show()
plt.clf()
# Fit a linear regression to predict score based on which lesson they took
model = sm.OLS.from_formula(
"score ~ lesson",
data = codecademy
)
result = model.fit()
print(result.params)
# Calculate and print the group means and mean difference (for comparison)
mean_score_lessonA = np.mean(codecademy.score[codecademy.lesson == 'Lesson A'])
mean_score_lessonB = np.mean(codecademy.score[codecademy.lesson == 'Lesson B'])
print('Mean score (A): ', mean_score_lessonA)
print('Mean score (B): ', mean_score_lessonB)
print('Mean score difference: ', mean_score_lessonA - mean_score_lessonB)
# Use `sns.lmplot()` to plot `score` vs. `completed` colored by `lesson`
sns.lmplot(
x = "completed",
y = "score",
hue = "lesson",
data = codecademy
)
plt.show()
plt.clf()
|
[
"noreply@github.com"
] |
noreply@github.com
|
e2e11b153277bbe7211ce627527473ce902b023d
|
f9a882428ba58da780b3a89ce9186faff8a0143c
|
/Hackerrank/Bill Division.py
|
4293a7671d28383cd70ba5dfb2c127d136c5ebac
|
[] |
no_license
|
Samyak2607/CompetitiveProgramming
|
5eaf4e616f32f457833a262701a6f8b2dca4c5d5
|
db1767e0ac9b53b69ed8034ef265c8a14b0b9717
|
refs/heads/master
| 2021-06-19T19:37:37.139347
| 2021-03-28T14:10:13
| 2021-03-28T14:10:13
| 196,148,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
def bonAppetit(bill, k, b):
temp=sum(bill)
temp=temp-bill[k]
if temp//2==b:
return 'Bon Appetit'
return abs((temp//2)-b)
for _ in range(int(input())):
n,k=map(int,input().split())
bill=list(map(int,input().split()))
b=int(input())
res=bonAppetit(bill, k, b)
print(res)
|
[
"samyakjain0888@gmail.com"
] |
samyakjain0888@gmail.com
|
66589b10ae7ca9569471c53354d4b22dedbf450e
|
4427be17604a2b25e3ed6ce643cecabf6d7b5516
|
/2021/day13.py
|
e87b4cd3387af619c5d3057ae91f6623f8b1cbc6
|
[] |
no_license
|
shuckc/adventofcode
|
778d26ed5b5112cf942e1ed15bd057f190f9bb84
|
9f9a486b9b81d74dae1c5cae43b975b1d4e618ff
|
refs/heads/master
| 2021-12-14T20:52:20.501413
| 2021-12-14T14:07:09
| 2021-12-14T14:07:09
| 47,255,375
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
import numpy as np
eg = """6,10
0,14
9,10
0,3
10,4
4,11
6,0
6,12
4,1
0,13
10,12
3,4
3,0
8,4
1,10
2,14
8,10
9,0
fold along y=7
fold along x=5
"""
def parse(eg):
numbers,instructions = eg.split("\n\n")
numbers = np.fromiter(numbers.replace("\n", ",").split(","), int).reshape([-1,2])
#print(numbers)
grid = np.zeros(shape=1+np.amax(numbers, axis=0), dtype=int)
t = np.transpose(numbers)
grid[t[0],t[1]] = 1
instructions = [(i[11], int(i[13:])) for i in filter(None, instructions.split('\n'))]
# print(grid)
print(instructions)
return np.transpose(grid), instructions
def fold(grid, instructions, limit=10^6):
#print("grid\n{}".format(grid))
for i,(direction,pos) in enumerate(instructions):
#print(i)
if direction == 'y':
top = grid[0:pos,:]
bot = np.flipud(grid[pos+1:,:])
#print("top\n{}".format(top))
#print("bottom\n{}".format(bot))
grid = top+bot
else:
left = grid[:,0:pos]
right = np.fliplr(grid[:,pos+1:])
#print("left\n{}".format(left))
#print("right\n{}".format(right))
grid = left+right
grid[grid>1] = 1
#print("combined\n{}".format(grid))
print("{} dots visible".format(np.sum(grid)))
if i>= limit:
break
return(grid)
fold(*parse(eg))
fold(*parse(open('input/day13.txt').read()), limit=1)
g = fold(*parse(open('input/day13.txt').read()))
with np.printoptions(threshold=np.inf):
print(g)
# [[1 1 1 1 0 1 1 1 1 0 1 0 0 0 0 1 1 1 1 0 0 0 1 1 0 0 1 1 0 0 1 1 1 0 0 1 1 1 1 0]
# [1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 1 0 1 0 0 1 0 1 0 0 1 0 1 0 0 0 0]
# [1 1 1 0 0 1 1 1 0 0 1 0 0 0 0 1 1 1 0 0 0 0 0 1 0 1 0 0 0 0 1 0 0 1 0 1 1 1 0 0]
# [1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 1 0 1 0 1 1 0 1 1 1 0 0 1 0 0 0 0]
# [1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 1 0 1 0 0 1 0 1 0 1 0 0 1 0 0 0 0]
# [1 1 1 1 0 1 0 0 0 0 1 1 1 1 0 1 0 0 0 0 0 1 1 0 0 0 1 1 1 0 1 0 0 1 0 1 0 0 0 0]]
# EFLFJGRF
|
[
"chris@shucksmith.co.uk"
] |
chris@shucksmith.co.uk
|
94a836f98274030034fc1d71f9ea205e92cb8242
|
9c8c8ae3842ec9a6f36730234c02f93f71ebda20
|
/vndk/tools/sourcedr/ninja/list_installed_module_under_source.py
|
3643e9d57df15529f03701ae39cfbbabc54bc9a2
|
[
"Apache-2.0"
] |
permissive
|
batyrf/platform_development
|
437bc6560a062d0ce7b27bab17b78109a72b1773
|
d4f7efc0c58598e3fc02a1e4fe8e751bd4ae8f0a
|
refs/heads/master
| 2020-12-26T18:37:29.529464
| 2020-02-01T04:54:27
| 2020-02-01T04:54:27
| 237,598,759
| 3
| 0
| null | 2020-02-01T10:35:07
| 2020-02-01T10:35:06
| null |
UTF-8
|
Python
| false
| false
| 2,232
|
py
|
#!/usr/bin/env python3
import argparse
import itertools
import json
import posixpath
import re
def match_any(regex, iterable):
"""Check whether any element in iterable matches regex."""
return any(regex.match(elem) for elem in iterable)
class ModuleInfo(object):
def __init__(self, module_info_path):
with open(module_info_path, 'r') as module_info_file:
self._json = json.load(module_info_file)
def list(self, installed_filter=None, module_definition_filter=None):
for name, info in self._json.items():
installs = info['installed']
paths = info['path']
if installed_filter and not match_any(installed_filter, installs):
continue
if module_definition_filter and \
not match_any(module_definition_filter, paths):
continue
for install, path in itertools.product(installs, paths):
yield (install, path)
def _parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument('module_info', help='Path to module-info.json')
parser.add_argument('--out-dir', default='out',
help='Android build output directory')
parser.add_argument('--installed-filter',
help='Installation filter (regular expression)')
parser.add_argument('--module-definition-filter',
help='Module definition filter (regular expression)')
return parser.parse_args()
def main():
"""Main function"""
args = _parse_args()
installed_filter = None
if args.installed_filter:
installed_filter = re.compile(
re.escape(posixpath.normpath(args.out_dir)) + '/' +
'(?:' + args.installed_filter + ')')
module_definition_filter = None
if args.module_definition_filter:
module_definition_filter = re.compile(args.module_definition_filter)
module_info = ModuleInfo(args.module_info)
for installed_file, module_path in \
module_info.list(installed_filter, module_definition_filter):
print(installed_file, module_path)
if __name__ == '__main__':
main()
|
[
"loganchien@google.com"
] |
loganchien@google.com
|
5c4b6a31a7e7502af119aafb08459ae1c81c95a7
|
44c30405bf6df4f42bc0fcc04a0c3c72a6a42128
|
/src/Functions.py
|
edc4a0ff66b62a674f218e9a19ddc654883af84b
|
[] |
no_license
|
sgh1/kindablog
|
4e97673d413b5b200fe17353142e0315cacf9db5
|
72aa6843776f8c619b5a0bb570a5c1466ad6c3b6
|
refs/heads/master
| 2021-09-06T08:03:24.106495
| 2018-02-04T04:25:31
| 2018-02-04T04:25:31
| 113,723,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,210
|
py
|
#
# Functions.py
# Some helper functions to keep clutter out of Main.py.
#
# Imports
import Settings
import markdown2
from markdown2Mathjax import sanitizeInput, reconstructMath
import os.path
import pprint
import web
class Functions(object):
@staticmethod
def GetRelatedPostsByTag(allMetaDataDict, tags):
"""
Get related posts by tags.
@param allMetaDataDict: Meta data from the meta pickle.
@param tags: Dictionary of tags for which we want related articles.
@return: Dictionary of filenames : titles of related articles.
"""
# Create dictionary for related articles.
relatedArticlesDict = {}
# Get related articles from meta data.
for tag in tags:
# See if the tag is in the meta data. In theory there should be at least 'this' article, but
# we don't explicitly enforce updating the meta info, so it might not be.
if tag in allMetaDataDict["byTag"]:
# Create filename / title entry in dictionary
for relatedArticle in allMetaDataDict["byTag"][tag]:
relatedArticlesDict[relatedArticle] = allMetaDataDict["byTitle"][relatedArticle]
# Break if we have enough related articles.
if len(relatedArticlesDict) >= Settings.Settings.relatedArticleListSize:
break
# Break if we have enough related articles.
if len(relatedArticlesDict) >= Settings.Settings.relatedArticleListSize:
break
return relatedArticlesDict
@staticmethod
def CreateMarkdownFromText(text):
"""
Create markdown from raw text that was read from file.
@param text: Raw text from .md file.
@return: HTML string containing processed markdown text, and metadata from .md file.
"""
# Do mathjax sanitizeInput idiom.
# Note, this seems broken, so it is worthless right now.
tmp = sanitizeInput(text)
# Create markdown.
markedDownText = markdown2.markdown(tmp[0], extras=["fenced-code-blocks", "metadata"])
# Load just this post's meta data.
myMeta = markedDownText.metadata
# Create final output ... md + mathjax.
finalOutput = reconstructMath(markedDownText,tmp[1])
return (finalOutput, myMeta)
@staticmethod
def ReadFile(pageName):
"""
Reads a file and returns contents of that file. If the file is not found, throw a 404.
@param pageName: File to read with respect to web-root.
@return: Contents of file.
"""
# Make sure the file exists.
if not os.path.exists(Settings.Settings.webRoot + "/" + pageName):
raise web.notfound()
# Just return the text of the .html file.
with open(Settings.Settings.webRoot + "/" + pageName, 'r') as myfile:
return myfile.read()
|
[
"ec2-user@ip-172-31-41-216.us-west-2.compute.internal"
] |
ec2-user@ip-172-31-41-216.us-west-2.compute.internal
|
d1a8b5e6c73dcc106af349846064f07e4d3a17f1
|
1716265268aae72b4f8842aa31c7ef64c44668b7
|
/apps/rss/fs_script/PCARI_RSS.py
|
1a1eec0e2101963b6c1d8413a9456abc2fc2a80e
|
[
"BSD-3-Clause"
] |
permissive
|
pcarivbts/vbts-clientfiles
|
e9f4e7df46de1bc57a9bc2093056bdc9ea17dfff
|
3b5c5a959fcf4a2dbed30c348951ed213f4ae786
|
refs/heads/master
| 2020-03-11T07:29:25.297695
| 2020-02-26T02:49:28
| 2020-02-26T02:49:28
| 91,312,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
"""
Copyright (c) 2015-present, Philippine-California Advanced Research Institutes-
The Village Base Station Project (PCARI-VBTS). All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from freeswitch import consoleLog
from pcarirss import rss
import requests
import json
def usage():
# ERRORS WERE HANDLED BY dictionary.py already
return "Unable to process your request at the moment. Please try again later."
def parse(args):
argss = args.split('|')
arglen = len(argss)
if not argss or (arglen < 1 or arglen > 2):
return None, None
else:
return argss
def get_output(args):
keyword, action = parse(args)
service_api = 'http://127.0.0.1:7000/api/service/'
r = requests.get(service_api, params={'keyword': keyword})
service = json.loads(r.text)
name = service['name']
number = service['number']
args = service['script_arguments']
func = rss.Rss(name, keyword, number, args)
ret = func.run(action)
return ret
def chat(message, args):
ret = get_output(args)
if ret:
consoleLog("info", "Returned Chat: " + str(ret) + "\n")
message.chat_execute("set", "_result=%s" % str(ret))
else:
consoleLog("info", usage())
def fsapi(session, stream, env, args):
ret = get_output(args)
if ret:
consoleLog("info", "Returned FSAPI: " + str(ret) + "\n")
stream.write(str(ret))
else:
stream.write(usage())
|
[
"pcari.vbts@gmail.com"
] |
pcari.vbts@gmail.com
|
117840b7f746720a7d129a75f2f7cde774ba515a
|
6b623a03ebb88e48a8e9f0528317ba28e8298e08
|
/node_modules/mongodb/node_modules/bson/build/config.gypi
|
91ce77280c69123cf9bbbb5b50e294bcc78a8108
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
AlexanderPolus/ixd-Delphi-official
|
19ea568344f4151c6beb28f7ee989bb8b00a66ad
|
77bac752542d3cc8a66cc6b89cf90c096f2a5b8d
|
refs/heads/master
| 2020-04-23T01:32:54.504608
| 2019-03-15T02:36:07
| 2019-03-15T02:36:07
| 169,649,114
| 2
| 1
|
MIT
| 2019-02-07T22:13:46
| 2019-02-07T21:57:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,028
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 46,
"host_arch": "ia32",
"node_install_npm": "true",
"node_prefix": "/usr/local",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "ia32",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/home/vagrant/.node-gyp/0.10.24",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"always_auth": "",
"user_agent": "node/v0.10.24 linux ia32",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/vagrant/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/home/vagrant/tmp",
"depth": "null",
"save_dev": "",
"usage": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/vagrant/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "v0.10.24",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/vagrant/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": ""
}
}
|
[
"cyang@godaddy.com"
] |
cyang@godaddy.com
|
8bb84a72bbd6de13b5d9c6549dbf73970d2d872b
|
b63f48ff34abc540880bdd80796d087cb3760b03
|
/sandstone/lib/tests/python/test_app.py
|
e5c9acb88a10280bbea8db8eb49d62d1b3074460
|
[
"MIT"
] |
permissive
|
saurabhsood91/sandstone-ide
|
8ed272ee16c5597a3a916205c63848e487dc38ae
|
25b666c43d48163bb21c628129e118429788eb51
|
refs/heads/master
| 2021-01-11T07:57:52.736877
| 2016-10-25T18:45:41
| 2016-10-25T18:45:41
| 72,131,628
| 0
| 0
| null | 2016-10-27T17:26:17
| 2016-10-27T17:26:17
| null |
UTF-8
|
Python
| false
| false
| 2,346
|
py
|
import unittest
import mock
import os
from tornado.web import StaticFileHandler
import sandstone
from sandstone.app import SandstoneApplication
from sandstone.lib import ui_methods
from sandstone.lib.handlers.main import MainHandler
from sandstone.lib.handlers.pam_auth import PAMLoginHandler
from sandstone import settings as default_settings
INSTALLED_APPS = (
'sandstone.lib',
'sandstone.apps.codeeditor',
'sandstone.apps.filebrowser',
)
APP_SPECS = []
for mod_name in ['sandstone.apps.codeeditor.settings','sandstone.apps.filebrowser.settings']:
mod = __import__(mod_name,fromlist=[''])
APP_SPECS.append(mod.APP_SPECIFICATION)
class MainAppTestCase(unittest.TestCase):
@mock.patch('sandstone.settings.INSTALLED_APPS',INSTALLED_APPS)
@mock.patch('sandstone.settings.APP_SPECIFICATIONS',APP_SPECS)
def setUp(self):
self.app = SandstoneApplication()
def test_app_settings(self):
self.assertEqual(type(self.app.settings),type({}))
expd = dict(
project_dir=sandstone.__path__[0],
static_dir=os.path.join(sandstone.__path__[0],'client/sandstone'),
login_url=default_settings.LOGIN_URL,
cookie_secret = default_settings.COOKIE_SECRET,
debug = default_settings.DEBUG,
xsrf_cookies=True,
ui_methods=ui_methods,
)
self.assertDictContainsSubset(expd,self.app.settings)
def test_app_handlers(self):
handlers = self.app.handlers[0][1]
hpaths = [h._path for h in handlers]
self.assertEqual(handlers[0]._path,'/static/core/%s')
self.assertTrue(issubclass(handlers[0].handler_class,StaticFileHandler))
self.assertTrue('/' in hpaths)
i = hpaths.index('/')
self.assertTrue(issubclass(handlers[i].handler_class,MainHandler))
self.assertTrue('/auth/login' in hpaths)
i = hpaths.index('/auth/login')
self.assertTrue(issubclass(handlers[i].handler_class,PAMLoginHandler))
self.assertTrue('/auth/logout' in hpaths)
self.assertTrue('/a/deps' in hpaths)
self.assertTrue('/static/editor/%s' in hpaths)
self.assertTrue('/static/filebrowser/%s' in hpaths)
self.assertTrue('/filebrowser/localfiles%s' in hpaths)
self.assertTrue('/filebrowser/filetree/a/dir' in hpaths)
|
[
"sampedro@colorado.edu"
] |
sampedro@colorado.edu
|
950e9fce4dcbd3c0bc732cdc70d82b7bb4f0e7c3
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayIserviceIsresourceTenantquerybytntidQueryModel.py
|
bf348d94e07635b10d4f588191dab57c1660c589
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayIserviceIsresourceTenantquerybytntidQueryModel(object):
def __init__(self):
self._tnt_inst_id = None
self._ur_id = None
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def ur_id(self):
return self._ur_id
@ur_id.setter
def ur_id(self, value):
self._ur_id = value
def to_alipay_dict(self):
params = dict()
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
if self.ur_id:
if hasattr(self.ur_id, 'to_alipay_dict'):
params['ur_id'] = self.ur_id.to_alipay_dict()
else:
params['ur_id'] = self.ur_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayIserviceIsresourceTenantquerybytntidQueryModel()
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
if 'ur_id' in d:
o.ur_id = d['ur_id']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
ba05627578638cfac87bf13a82cc59a5f675c8c8
|
bc6b969ba9c75d4b7c9d7edf47e2d0ac90dcd972
|
/python/stl.py
|
40e85ed0d241bc8e8ebe2a2ad67c3d53a2e2c3b2
|
[] |
no_license
|
jeoninhyuk/TIL
|
6b800d1c92643d118bf9627535d11c7679ee555d
|
d3c7927d456250369049fba6781e52c5cbe9ecfd
|
refs/heads/master
| 2020-05-27T18:55:01.415366
| 2019-06-21T05:43:37
| 2019-06-21T05:43:37
| 188,750,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
# String Interpolation
#a = '123'
#new_a = f'{a}'
# 1. 옛날 방식
#'%s %s' % ('one','two') #=> 'one two'
# 2. pyformat
#'{} {}'.format('one','two') #=> 'one two'
# name = '홍길동'
# eng_name = 'Hong'
#
# print('안녕하세요,{}입니다. my nme is {1}'.format(name, eng_name)
# 3. f-string
#a, b = 'one', two
#f'{a} {b}' #=> 'one two'
# name = '홍길공'
# print(f'안녕하셀',{name}입니다.')
|
[
"jeoninhyuk94@naver.com"
] |
jeoninhyuk94@naver.com
|
1c896176c54b9dd628a492f4f4369079f41e5962
|
ccbbc82efab60f11decd7632e30e79562e0f7dda
|
/hri_api/src/hri_api/util/robot_config_parser.py
|
02bc242d7886ddedad3f2d4f7ed458aa829e0c95
|
[
"BSD-3-Clause"
] |
permissive
|
georgepar/hri
|
b26a0a2c5739017c6aa204e5b14e5f19295f0f2a
|
60e2713c0106a5ff05532f4c27eecb4fbf9f1e24
|
refs/heads/master
| 2021-01-22T06:02:21.014837
| 2014-10-19T11:04:35
| 2014-10-19T11:04:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('hri_api')
import yaml
class RobotConfigParser(object):
@staticmethod
def load_robot_type(config_file_path):
with open(config_file_path, 'r') as file:
config = yaml.load(file)
robot_type = config['robot']['type']
return robot_type
@staticmethod
def load_gestures(config_file_path):
with open(config_file_path, 'r') as file:
config = yaml.load(file)
gestures = config['robot']['gestures']
return gestures
@staticmethod
def load_facial_expressions(config_file_path):
with open(config_file_path, 'r') as file:
config = yaml.load(file)
facial_expressions = config['robot']['facial_expressions']
return facial_expressions
|
[
"jdip004@aucklanduni.ac.nz"
] |
jdip004@aucklanduni.ac.nz
|
a2bb3774baf6676fafd2e71ab388e0f6d6320176
|
68cce04ffbdd0840cd86aaf73551bd58dff016df
|
/RandomGenerator.py
|
2ba633955452201361a142abcb4f1b6e74f4bb80
|
[] |
no_license
|
ignaciocaramuto/TP1_Simulacion
|
ff9d22a7412808a022a1893cc064c823afff737b
|
9829d449ec2268858b5f38e64a14d08e3d9dcdeb
|
refs/heads/master
| 2022-07-29T02:27:46.410114
| 2020-05-12T21:53:44
| 2020-05-12T21:53:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
import pylab as plt
import numpy as np
# LCG Implementation
def lcg(a, c, m, initial_seed):
seed = initial_seed
while True:
rand = (a * seed + c) % m
seed = rand
yield rand
def random_sample(n, interval, seed = 20200420162000):
lower, upper = interval[0], (interval[1]+1)
sample = []
varAux = lcg(7**5, 5**7, 2**32, seed)
for i in range(n):
observation = (upper - lower) * (next(varAux) / ((2**32)-1)) + lower
sample.append(int(observation))
return sample
Z = []
for i in range(501):
Z.append(random_sample(500, [0, 1]))
plt.imshow(Z, cmap='gray', interpolation='nearest')
plt.show()
U = np.random.random((500, 500)) # Test data
plt.imshow(U, cmap='gray', interpolation='nearest')
plt.show()
|
[
"pereyramartinn@gmail.com"
] |
pereyramartinn@gmail.com
|
089ad9072c71628b13f8d6e395b39dd3d4ce9127
|
9048a6c949b873091142d0e1f6360fcfd3b2ab4e
|
/recharge.py
|
c92b2db5eefb0d440f4fe8fa212599065653c665
|
[] |
no_license
|
ayush5716/schoolproject
|
525cd0e7bb6e837c52fbbdec258e68e79620a290
|
78a012a94f7419ffa80d72873f05b0771f658657
|
refs/heads/master
| 2022-11-30T22:51:14.956445
| 2020-07-28T19:17:11
| 2020-07-28T19:17:11
| 283,304,268
| 0
| 0
| null | 2020-07-28T19:17:12
| 2020-07-28T19:10:08
| null |
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 16:32:38 2020
@author: ayush class 12B
"""
import csv
with open('RECHARGE.csv','a',newline='') as afile:
write=csv.writer(afile,lineterminator='\n')
write.writerow(["Customer_Mobile","Recharge_Date","Recharge_Amount","Service_provider"])
def enter():
with open('RECHARGE.csv','a+',newline='') as bfile:
write=csv.writer(bfile,lineterminator='\n')
write.writerow([Customer_Mobile,Recharge_Date,Recharge_Amount,Service_Provider])
def collection():
p=('| {:^15s} | {:^15s} | {:^16s} | {:^15s} |'.format(Recharge_Date,Customer_Mobile,Service_Provider,Recharge_Amount)+"\n")
with open('collect.txt','a+') as pfile:
pfile.write(p)
pfile.close()
def printbill():
with open('collect.txt','r') as rfile:
cont=rfile.readlines()
print("-"*86)
print('{:^80s}'.format("AYUSH CELLPHONE LTD."))
print('{:^80s}'.format("BHILAI"))
print("-"*86)
print(" "+'| {:^15s} | {:^15s} | {:^16s} | {:^15s} |'.format('recharge date','cumtomer mobile','service provider','recharge amount'))
print("-"*86)
for content in cont:
content=content.rstrip('\n').split('#')
print(content)
print('-'*86)
add_recharge="yes"
packs=["10","20","50","100","200"]
while add_recharge=="yes":
add_recharge=input("Do you want to enter one more recharge info. (yes/no):")
if add_recharge=="yes":
Customer_Mobile=input("Enter mobile number:")
Recharge_Date=input("Enter date(dd/mm/yy):")
Recharge_Amount=input("Enter the recharge amount:")
while Recharge_Amount not in packs:
print("there is no pack available in this amount")
Recharge_Amount=input("Please anter a valid recharge amount:")
Service_Provider=input("Enter you service provider name:")
enter()
collection()
else:
print("your recharge details have been noted.")
break
printbill()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b0a2b547e14de33c18ce4191d29376ac54166335
|
7d07a4453b6faad6cbc24d44caaa3ad1ab6ebe7f
|
/src/stc/gen_iface.py
|
8b43990623acd25d996466702a54e0b1819c0a57
|
[] |
no_license
|
rickyzhang82/wxpython-src-2.9.4.0
|
5a7fff6156fbf9ec1f372a3c6afa860c59bf8ea8
|
c9269e81638ccb74ae5086557567592aaa2aa695
|
refs/heads/master
| 2020-05-24T12:12:13.805532
| 2019-05-17T17:34:34
| 2019-05-17T17:34:34
| 187,259,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,001
|
py
|
#!/usr/bin/env python
#----------------------------------------------------------------------------
# Name: gen_iface.py
# Purpose: Generate stc.h and stc.cpp from the info in Scintilla.iface
#
# Author: Robin Dunn
#
# Created: 5-Sept-2000
# RCS-ID: $Id: gen_iface.py 71556 2012-05-25 09:48:12Z VZ $
# Copyright: (c) 2000 by Total Control Software
# Licence: wxWindows licence
#----------------------------------------------------------------------------
import sys, string, re, os
from fileinput import FileInput
IFACE = os.path.abspath('./scintilla/include/Scintilla.iface')
H_TEMPLATE = os.path.abspath('./stc.h.in')
CPP_TEMPLATE = os.path.abspath('./stc.cpp.in')
H_DEST = os.path.abspath('../../include/wx/stc/stc.h')
CPP_DEST = os.path.abspath('./stc.cpp')
if len(sys.argv) > 1 and sys.argv[1] == '--wxpython':
DOCSTR_DEST = os.path.abspath('../../../wxPython/src/_stc_gendocs.i')
else:
DOCSTR_DEST = '/dev/null'
# Value prefixes to convert
valPrefixes = [('SCI_', ''),
('SC_', ''),
('SCN_', None), # just toss these out...
('SCEN_', None),
('SC_EFF', None),
('SCE_', ''),
('SCLEX_', 'LEX_'),
('SCK_', 'KEY_'),
('SCFIND_', 'FIND_'),
('SCWS_', 'WS_'),
]
# Message function values that should have a CMD_ constant generated
cmdValues = [ 2011,
2013,
(2176, 2180),
(2300, 2349),
(2390, 2393),
(2395, 2396),
2404,
(2413, 2416),
(2426, 2442),
(2450, 2455),
2518,
]
# Should a funciton be also generated for the CMDs?
FUNC_FOR_CMD = 1
# Map some generic typenames to wx types, using return value syntax
retTypeMap = {
'position': 'int',
'string': 'wxString',
'colour': 'wxColour',
}
# Map some generic typenames to wx types, using parameter syntax
paramTypeMap = {
'position': 'int',
'string': 'const wxString&',
'colour': 'const wxColour&',
'keymod': 'int',
}
# Map of method info that needs tweaked. Either the name needs changed, or
# the method definition/implementation. Tuple items are:
#
# 1. New method name. None to skip the method, 0 to leave the
# default name.
# 2. Method definition for the .h file, 0 to leave alone
# 3. Method implementation for the .cpp file, 0 to leave alone.
# 4. tuple of Doc string lines, or 0 to leave alone.
#
methodOverrideMap = {
'AddText' : (0,
'void %s(const wxString& text);',
'''void %s(const wxString& text) {
const wxWX2MBbuf buf = wx2stc(text);
SendMsg(%s, wx2stclen(text, buf), (sptr_t)(const char*)buf);''',
0),
'AddStyledText' : (0,
'void %s(const wxMemoryBuffer& data);',
'''void %s(const wxMemoryBuffer& data) {
SendMsg(%s, data.GetDataLen(), (sptr_t)data.GetData());''',
0),
'AppendText' : (0,
'void %s(const wxString& text);',
'''void %s(const wxString& text) {
const wxWX2MBbuf buf = wx2stc(text);
SendMsg(%s, wx2stclen(text, buf), (sptr_t)(const char*)buf);''',
0),
'GetViewWS' : ( 'GetViewWhiteSpace', 0, 0, 0),
'SetViewWS' : ( 'SetViewWhiteSpace', 0, 0, 0),
'GetCharAt' :
( 0, 0,
'''int %s(int pos) const {
return (unsigned char)SendMsg(%s, pos, 0);''',
0),
'GetStyleAt' :
( 0, 0,
'''int %s(int pos) const {
return (unsigned char)SendMsg(%s, pos, 0);''',
0),
'GetStyledText' :
(0,
'wxMemoryBuffer %s(int startPos, int endPos);',
'''wxMemoryBuffer %s(int startPos, int endPos) {
wxMemoryBuffer buf;
if (endPos < startPos) {
int temp = startPos;
startPos = endPos;
endPos = temp;
}
int len = endPos - startPos;
if (!len) return buf;
TextRange tr;
tr.lpstrText = (char*)buf.GetWriteBuf(len*2+1);
tr.chrg.cpMin = startPos;
tr.chrg.cpMax = endPos;
len = SendMsg(%s, 0, (sptr_t)&tr);
buf.UngetWriteBuf(len);
return buf;''',
('Retrieve a buffer of cells.',)),
'PositionFromPoint' :
(0,
'int %s(wxPoint pt) const;',
'''int %s(wxPoint pt) const {
return SendMsg(%s, pt.x, pt.y);''',
0),
'GetCurLine' :
(0,
'#ifdef SWIG\n wxString %s(int* OUTPUT);\n#else\n wxString GetCurLine(int* linePos=NULL);\n#endif',
'''wxString %s(int* linePos) {
int len = LineLength(GetCurrentLine());
if (!len) {
if (linePos) *linePos = 0;
return wxEmptyString;
}
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
int pos = SendMsg(%s, len+1, (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
if (linePos) *linePos = pos;
return stc2wx(buf);''',
0),
'SetUsePalette' : (None, 0,0,0),
'MarkerSetFore' : ('MarkerSetForeground', 0, 0, 0),
'MarkerSetBack' : ('MarkerSetBackground', 0, 0, 0),
'MarkerSymbolDefined' : ('GetMarkerSymbolDefined', 0, 0, 0),
'MarkerDefine' :
(0,
'''void %s(int markerNumber, int markerSymbol,
const wxColour& foreground = wxNullColour,
const wxColour& background = wxNullColour);''',
'''void %s(int markerNumber, int markerSymbol,
const wxColour& foreground,
const wxColour& background) {
SendMsg(%s, markerNumber, markerSymbol);
if (foreground.IsOk())
MarkerSetForeground(markerNumber, foreground);
if (background.IsOk())
MarkerSetBackground(markerNumber, background);''',
('Set the symbol used for a particular marker number,',
'and optionally the fore and background colours.')),
'MarkerDefinePixmap' :
('MarkerDefineBitmap',
'''void %s(int markerNumber, const wxBitmap& bmp);''',
'''void %s(int markerNumber, const wxBitmap& bmp) {
// convert bmp to a xpm in a string
wxMemoryOutputStream strm;
wxImage img = bmp.ConvertToImage();
if (img.HasAlpha())
img.ConvertAlphaToMask();
img.SaveFile(strm, wxBITMAP_TYPE_XPM);
size_t len = strm.GetSize();
char* buff = new char[len+1];
strm.CopyTo(buff, len);
buff[len] = 0;
SendMsg(%s, markerNumber, (sptr_t)buff);
delete [] buff;
''',
('Define a marker from a bitmap',)),
'SetMarginTypeN' : ('SetMarginType', 0, 0, 0),
'GetMarginTypeN' : ('GetMarginType', 0, 0, 0),
'SetMarginWidthN' : ('SetMarginWidth', 0, 0, 0),
'GetMarginWidthN' : ('GetMarginWidth', 0, 0, 0),
'SetMarginMaskN' : ('SetMarginMask', 0, 0, 0),
'GetMarginMaskN' : ('GetMarginMask', 0, 0, 0),
'SetMarginSensitiveN' : ('SetMarginSensitive', 0, 0, 0),
'GetMarginSensitiveN' : ('GetMarginSensitive', 0, 0, 0),
'MarginGetText' :
(0,
'wxString %s(int line) const;',
'''wxString %s(int line) const {
long msg = %s;
long len = SendMsg(msg, line, 0);
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(msg, line, (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
0),
'MarginGetStyles' :
(0,
'wxString %s(int line) const;',
'''wxString %s(int line) const {
long msg = %s;
long len = SendMsg(msg, line, 0);
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(msg, line, (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
0),
'SetAdditionalSelFore' : ('SetAdditionalSelForeground', 0, 0, 0),
'SetAdditionalSelBack' : ('SetAdditionalSelBackground', 0, 0, 0),
'SetAdditionalCaretFore' : ('SetAdditionalCaretForeground', 0, 0, 0),
'GetAdditionalCaretFore' : ('GetAdditionalCaretForeground', 0, 0, 0),
'AnnotationGetText' :
(0,
'wxString %s(int line) const;',
'''wxString %s(int line) const {
long msg = %s;
long len = SendMsg(msg, line, 0);
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(msg, line, (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
0),
'AnnotationGetStyles' :
(0,
'wxString %s(int line) const;',
'''wxString %s(int line) const {
long msg = %s;
long len = SendMsg(msg, line, 0);
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(msg, line, (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
0),
'StyleGetFore' : ('StyleGetForeground', 0, 0, 0),
'StyleGetBack' : ('StyleGetBackground', 0, 0, 0),
'StyleSetFore' : ('StyleSetForeground', 0, 0, 0),
'StyleSetBack' : ('StyleSetBackground', 0, 0, 0),
'SetSelFore' : ('SetSelForeground', 0, 0, 0),
'SetSelBack' : ('SetSelBackground', 0, 0, 0),
'SetCaretFore' : ('SetCaretForeground', 0, 0, 0),
'StyleGetFont' :
('StyleGetFaceName',
'wxString %s(int style);',
'''wxString %s(int style) {
long msg = %s;
long len = SendMsg(msg, style, 0);
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(msg, style, (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
('Get the font facename of a style',)),
'StyleSetFont' : ('StyleSetFaceName', 0, 0, 0),
'StyleSetCharacterSet' : (None, 0, 0, 0),
'AssignCmdKey' :
('CmdKeyAssign',
'void %s(int key, int modifiers, int cmd);',
'''void %s(int key, int modifiers, int cmd) {
SendMsg(%s, MAKELONG(key, modifiers), cmd);''',
0),
'ClearCmdKey' :
('CmdKeyClear',
'void %s(int key, int modifiers);',
'''void %s(int key, int modifiers) {
SendMsg(%s, MAKELONG(key, modifiers));''',
0),
'ClearAllCmdKeys' : ('CmdKeyClearAll', 0, 0, 0),
'SetStylingEx' :
('SetStyleBytes',
'void %s(int length, char* styleBytes);',
'''void %s(int length, char* styleBytes) {
SendMsg(%s, length, (sptr_t)styleBytes);''',
0),
'IndicSetAlpha' : ('IndicatorSetAlpha', 0, 0, 0),
'IndicGetAlpha' : ('IndicatorGetAlpha', 0, 0, 0),
'IndicSetStyle' : ('IndicatorSetStyle', 0, 0, 0),
'IndicGetStyle' : ('IndicatorGetStyle', 0, 0, 0),
'IndicSetFore' : ('IndicatorSetForeground', 0, 0, 0),
'IndicGetFore' : ('IndicatorGetForeground', 0, 0, 0),
'IndicSetUnder': ('IndicatorSetUnder', 0, 0, 0),
'IndicGetUnder': ('IndicatorGetUnder', 0, 0, 0),
'SetWhitespaceFore' : ('SetWhitespaceForeground', 0, 0, 0),
'SetWhitespaceBack' : ('SetWhitespaceBackground', 0, 0, 0),
'AutoCShow' : ('AutoCompShow', 0, 0, 0),
'AutoCCancel' : ('AutoCompCancel', 0, 0, 0),
'AutoCActive' : ('AutoCompActive', 0, 0, 0),
'AutoCPosStart' : ('AutoCompPosStart', 0, 0, 0),
'AutoCComplete' : ('AutoCompComplete', 0, 0, 0),
'AutoCStops' : ('AutoCompStops', 0, 0, 0),
'AutoCSetSeparator' : ('AutoCompSetSeparator', 0, 0, 0),
'AutoCGetSeparator' : ('AutoCompGetSeparator', 0, 0, 0),
'AutoCSelect' : ('AutoCompSelect', 0, 0, 0),
'AutoCSetCancelAtStart' : ('AutoCompSetCancelAtStart', 0, 0, 0),
'AutoCGetCancelAtStart' : ('AutoCompGetCancelAtStart', 0, 0, 0),
'AutoCSetFillUps' : ('AutoCompSetFillUps', 0, 0, 0),
'AutoCSetChooseSingle' : ('AutoCompSetChooseSingle', 0, 0, 0),
'AutoCGetChooseSingle' : ('AutoCompGetChooseSingle', 0, 0, 0),
'AutoCSetIgnoreCase' : ('AutoCompSetIgnoreCase', 0, 0, 0),
'AutoCGetIgnoreCase' : ('AutoCompGetIgnoreCase', 0, 0, 0),
'AutoCSetAutoHide' : ('AutoCompSetAutoHide', 0, 0, 0),
'AutoCGetAutoHide' : ('AutoCompGetAutoHide', 0, 0, 0),
'AutoCSetDropRestOfWord' : ('AutoCompSetDropRestOfWord', 0,0,0),
'AutoCGetDropRestOfWord' : ('AutoCompGetDropRestOfWord', 0,0,0),
'AutoCGetTypeSeparator' : ('AutoCompGetTypeSeparator', 0, 0, 0),
'AutoCSetTypeSeparator' : ('AutoCompSetTypeSeparator', 0, 0, 0),
'AutoCGetCurrent' : ('AutoCompGetCurrent', 0, 0, 0),
'AutoCGetCurrentText' : (None, 0, 0, 0),
'AutoCSetMaxWidth' : ('AutoCompSetMaxWidth', 0, 0, 0),
'AutoCGetMaxWidth' : ('AutoCompGetMaxWidth', 0, 0, 0),
'AutoCSetMaxHeight' : ('AutoCompSetMaxHeight', 0, 0, 0),
'AutoCGetMaxHeight' : ('AutoCompGetMaxHeight', 0, 0, 0),
'AutoCGetMaxHeight' : ('AutoCompGetMaxHeight', 0, 0, 0),
'RegisterImage' :
(0,
'''void %s(int type, const wxBitmap& bmp);''',
'''void %s(int type, const wxBitmap& bmp) {
// convert bmp to a xpm in a string
wxMemoryOutputStream strm;
wxImage img = bmp.ConvertToImage();
if (img.HasAlpha())
img.ConvertAlphaToMask();
img.SaveFile(strm, wxBITMAP_TYPE_XPM);
size_t len = strm.GetSize();
char* buff = new char[len+1];
strm.CopyTo(buff, len);
buff[len] = 0;
SendMsg(%s, type, (sptr_t)buff);
delete [] buff;
''',
('Register an image for use in autocompletion lists.',)),
'ClearRegisteredImages' : (0, 0, 0,
('Clear all the registered images.',)),
'SetHScrollBar' : ('SetUseHorizontalScrollBar', 0, 0, 0),
'GetHScrollBar' : ('GetUseHorizontalScrollBar', 0, 0, 0),
'SetVScrollBar' : ('SetUseVerticalScrollBar', 0, 0, 0),
'GetVScrollBar' : ('GetUseVerticalScrollBar', 0, 0, 0),
'GetCaretFore' : ('GetCaretForeground', 0, 0, 0),
'GetUsePalette' : (None, 0, 0, 0),
'FindText' :
(0,
'''int %s(int minPos, int maxPos, const wxString& text, int flags=0);''',
'''int %s(int minPos, int maxPos,
const wxString& text,
int flags) {
TextToFind ft;
ft.chrg.cpMin = minPos;
ft.chrg.cpMax = maxPos;
const wxWX2MBbuf buf = wx2stc(text);
ft.lpstrText = (char*)(const char*)buf;
return SendMsg(%s, flags, (sptr_t)&ft);''',
0),
'FormatRange' :
(0,
'''int %s(bool doDraw,
int startPos,
int endPos,
wxDC* draw,
wxDC* target,
wxRect renderRect,
wxRect pageRect);''',
''' int %s(bool doDraw,
int startPos,
int endPos,
wxDC* draw,
wxDC* target,
wxRect renderRect,
wxRect pageRect) {
RangeToFormat fr;
if (endPos < startPos) {
int temp = startPos;
startPos = endPos;
endPos = temp;
}
fr.hdc = draw;
fr.hdcTarget = target;
fr.rc.top = renderRect.GetTop();
fr.rc.left = renderRect.GetLeft();
fr.rc.right = renderRect.GetRight();
fr.rc.bottom = renderRect.GetBottom();
fr.rcPage.top = pageRect.GetTop();
fr.rcPage.left = pageRect.GetLeft();
fr.rcPage.right = pageRect.GetRight();
fr.rcPage.bottom = pageRect.GetBottom();
fr.chrg.cpMin = startPos;
fr.chrg.cpMax = endPos;
return SendMsg(%s, doDraw, (sptr_t)&fr);''',
0),
'GetLine' :
(0,
'wxString %s(int line) const;',
'''wxString %s(int line) const {
int len = LineLength(line);
if (!len) return wxEmptyString;
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(%s, line, (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
('Retrieve the contents of a line.',)),
'SetSel' : (None, 0,0,0), #'SetSelection', 0, 0, 0),
'GetSelText' :
('GetSelectedText',
'wxString %s();',
'''wxString %s() {
const int len = SendMsg(SCI_GETSELTEXT, 0, (sptr_t)0);
if (!len) return wxEmptyString;
wxMemoryBuffer mbuf(len+2);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(%s, 0, (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
('Retrieve the selected text.',)),
'GetTextRange' :
(0,
'wxString %s(int startPos, int endPos);',
'''wxString %s(int startPos, int endPos) {
if (endPos < startPos) {
int temp = startPos;
startPos = endPos;
endPos = temp;
}
int len = endPos - startPos;
if (!len) return wxEmptyString;
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len);
TextRange tr;
tr.lpstrText = buf;
tr.chrg.cpMin = startPos;
tr.chrg.cpMax = endPos;
SendMsg(%s, 0, (sptr_t)&tr);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
('Retrieve a range of text.',)),
'PointXFromPosition' : (None, 0, 0, 0),
'PointYFromPosition' : (None, 0, 0, 0),
'ScrollCaret' : ('EnsureCaretVisible', 0, 0, 0),
'ReplaceSel' : ('ReplaceSelection', 0, 0, 0),
'Null' : (None, 0, 0, 0),
'GetText' :
(0,
'wxString %s() const;',
'''wxString %s() const {
int len = GetTextLength();
wxMemoryBuffer mbuf(len+1); // leave room for the null...
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(%s, len+1, (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
('Retrieve all the text in the document.', )),
'GetDirectFunction' : (None, 0, 0, 0),
'GetDirectPointer' : (None, 0, 0, 0),
'CallTipPosStart' : ('CallTipPosAtStart', 0, 0, 0),
'CallTipSetHlt' : ('CallTipSetHighlight', 0, 0, 0),
'CallTipSetBack' : ('CallTipSetBackground', 0, 0, 0),
'CallTipSetFore' : ('CallTipSetForeground', 0, 0, 0),
'CallTipSetForeHlt' : ('CallTipSetForegroundHighlight', 0, 0, 0),
'SetHotspotActiveFore' : ('SetHotspotActiveForeground', 0, 0, 0),
'SetHotspotActiveBack' : ('SetHotspotActiveBackground', 0, 0, 0),
'GetHotspotActiveFore' : ('GetHotspotActiveForeground', 0, 0, 0),
'GetHotspotActiveBack' : ('GetHotspotActiveBackground', 0, 0, 0),
'GetCaretLineBack' : ('GetCaretLineBackground', 0, 0, 0),
'SetCaretLineBack' : ('SetCaretLineBackground', 0, 0, 0),
'ReplaceTarget' :
(0,
'int %s(const wxString& text);',
'''
int %s(const wxString& text) {
const wxWX2MBbuf buf = wx2stc(text);
return SendMsg(%s, wx2stclen(text, buf), (sptr_t)(const char*)buf);''',
0),
'ReplaceTargetRE' :
(0,
'int %s(const wxString& text);',
'''
int %s(const wxString& text) {
const wxWX2MBbuf buf = wx2stc(text);
return SendMsg(%s, wx2stclen(text, buf), (sptr_t)(const char*)buf);''',
0),
'SearchInTarget' :
(0,
'int %s(const wxString& text);',
'''
int %s(const wxString& text) {
const wxWX2MBbuf buf = wx2stc(text);
return SendMsg(%s, wx2stclen(text, buf), (sptr_t)(const char*)buf);''',
0),
# not sure what to do about these yet
'TargetAsUTF8' : ( None, 0, 0, 0),
'SetLengthForEncode' : ( None, 0, 0, 0),
'EncodedFromUTF8' : ( None, 0, 0, 0),
'GetProperty' :
(0,
'wxString %s(const wxString& key);',
'''wxString %s(const wxString& key) {
int len = SendMsg(SCI_GETPROPERTY, (sptr_t)(const char*)wx2stc(key), 0);
if (!len) return wxEmptyString;
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(%s, (uptr_t)(const char*)wx2stc(key), (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
("Retrieve a 'property' value previously set with SetProperty.",)),
'GetPropertyExpanded' :
(0,
'wxString %s(const wxString& key);',
'''wxString %s(const wxString& key) {
int len = SendMsg(SCI_GETPROPERTYEXPANDED, (uptr_t)(const char*)wx2stc(key), 0);
if (!len) return wxEmptyString;
wxMemoryBuffer mbuf(len+1);
char* buf = (char*)mbuf.GetWriteBuf(len+1);
SendMsg(%s, (uptr_t)(const char*)wx2stc(key), (sptr_t)buf);
mbuf.UngetWriteBuf(len);
mbuf.AppendByte(0);
return stc2wx(buf);''',
("Retrieve a 'property' value previously set with SetProperty,",
"with '$()' variable replacement on returned buffer.")),
'GetPropertyInt' : (0, 0, 0,
("Retrieve a 'property' value previously set with SetProperty,",
"interpreted as an int AFTER any '$()' variable replacement.")),
'GetDocPointer' :
(0,
'void* %s();',
'''void* %s() {
return (void*)SendMsg(%s);''',
0),
'SetDocPointer' :
(0,
'void %s(void* docPointer);',
'''void %s(void* docPointer) {
SendMsg(%s, 0, (sptr_t)docPointer);''',
0),
'CreateDocument' :
(0,
'void* %s();',
'''void* %s() {
return (void*)SendMsg(%s);''',
0),
'AddRefDocument' :
(0,
'void %s(void* docPointer);',
'''void %s(void* docPointer) {
SendMsg(%s, 0, (sptr_t)docPointer);''',
0),
'ReleaseDocument' :
(0,
'void %s(void* docPointer);',
'''void %s(void* docPointer) {
SendMsg(%s, 0, (sptr_t)docPointer);''',
0),
'SetCodePage' :
(0,
0,
'''void %s(int codePage) {
#if wxUSE_UNICODE
wxASSERT_MSG(codePage == wxSTC_CP_UTF8,
wxT("Only wxSTC_CP_UTF8 may be used when wxUSE_UNICODE is on."));
#else
wxASSERT_MSG(codePage != wxSTC_CP_UTF8,
wxT("wxSTC_CP_UTF8 may not be used when wxUSE_UNICODE is off."));
#endif
SendMsg(%s, codePage);''',
("Set the code page used to interpret the bytes of the document as characters.",) ),
'GrabFocus' : (None, 0, 0, 0),
# Rename some that would otherwise hide the wxWindow methods
'SetFocus' : ('SetSTCFocus', 0, 0, 0),
'GetFocus' : ('GetSTCFocus', 0, 0, 0),
'SetCursor' : ('SetSTCCursor', 0, 0, 0),
'GetCursor' : ('GetSTCCursor', 0, 0, 0),
'LoadLexerLibrary' : (None, 0,0,0),
'SetPositionCache' : ('SetPositionCacheSize', 0, 0, 0),
'GetPositionCache' : ('GetPositionCacheSize', 0, 0, 0),
'GetLexerLanguage' : (None, 0, 0, 0),
'SetFontQuality' : (None, 0, 0, 0),
'GetFontQuality' : (None, 0, 0, 0),
'SetSelection' : (None, 0, 0, 0),
'GetCharacterPointer' : (0,
'const char* %s();',
'const char* %s() {\n'
' return (const char*)SendMsg(%s, 0, 0);',
0),
'' : ('', 0, 0, 0),
}
# all Scintilla getters are transformed into const member of wxSTC class but
# some non-getter methods are also logically const and this set contains their
# names (notice that it's useless to include here methods manually overridden
# above)
constNonGetterMethods = (
'LineFromPosition',
'PositionFromLine',
'LineLength',
'CanPaste',
'CanRedo',
'CanUndo',
)
#----------------------------------------------------------------------------
def processIface(iface, h_tmplt, cpp_tmplt, h_dest, cpp_dest, docstr_dest):
curDocStrings = []
values = []
methods = []
cmds = []
# parse iface file
fi = FileInput(iface)
for line in fi:
line = line[:-1]
if line[:2] == '##' or line == '':
#curDocStrings = []
continue
op = line[:4]
if line[:2] == '# ': # a doc string
curDocStrings.append(line[2:])
elif op == 'val ':
parseVal(line[4:], values, curDocStrings)
curDocStrings = []
elif op == 'fun ' or op == 'set ' or op == 'get ':
parseFun(line[4:], methods, curDocStrings, cmds, op == 'get ')
curDocStrings = []
elif op == 'cat ':
if line[4:].strip() == 'Deprecated':
break # skip the rest of the file
elif op == 'evt ':
pass
elif op == 'enu ':
pass
elif op == 'lex ':
pass
else:
print('***** Unknown line type: %s' % line)
# process templates
data = {}
data['VALUES'] = processVals(values)
data['CMDS'] = processVals(cmds)
defs, imps, docstrings = processMethods(methods)
data['METHOD_DEFS'] = defs
data['METHOD_IMPS'] = imps
# get template text
h_text = open(h_tmplt).read()
cpp_text = open(cpp_tmplt).read()
# do the substitutions
h_text = h_text % data
cpp_text = cpp_text % data
# write out destination files
open(h_dest, 'w').write(h_text)
open(cpp_dest, 'w').write(cpp_text)
open(docstr_dest, 'w').write(docstrings)
def joinWithNewLines(values):
return '\n'.join(values)
#----------------------------------------------------------------------------
def processVals(values):
text = []
for name, value, docs in values:
if docs:
text.append('')
for x in docs:
text.append('// ' + x)
text.append('#define %s %s' % (name, value))
return joinWithNewLines(text)
#----------------------------------------------------------------------------
def processMethods(methods):
defs = []
imps = []
dstr = []
for retType, name, number, param1, param2, docs, is_const in methods:
retType = retTypeMap.get(retType, retType)
params = makeParamString(param1, param2)
name, theDef, theImp, docs = checkMethodOverride(name, number, docs)
if name is None:
continue
# Build docstrings
st = 'DocStr(wxStyledTextCtrl::%s,\n' \
'"%s", "");\n' % (name, joinWithNewLines(docs))
dstr.append(st)
# Build the method definition for the .h file
if docs:
defs.append('')
for x in docs:
defs.append(' // ' + x)
if not theDef:
theDef = ' %s %s(%s)' % (retType, name, params)
if is_const:
theDef = theDef + ' const'
theDef = theDef + ';'
defs.append(theDef)
# Build the method implementation string
if docs:
imps.append('')
for x in docs:
imps.append('// ' + x)
if not theImp:
theImp = '%s wxStyledTextCtrl::%s(%s)' % (retType, name, params)
if is_const:
theImp = theImp + ' const'
theImp = theImp + '\n{\n '
if retType == 'wxColour':
theImp = theImp + 'long c = '
elif retType != 'void':
theImp = theImp + 'return '
theImp = theImp + 'SendMsg(%s, %s, %s)' % (number,
makeArgString(param1),
makeArgString(param2))
if retType == 'bool':
theImp = theImp + ' != 0'
if retType == 'wxColour':
theImp = theImp + ';\n return wxColourFromLong(c)'
theImp = theImp + ';\n}'
imps.append(theImp)
return joinWithNewLines(defs), joinWithNewLines(imps), joinWithNewLines(dstr)
#----------------------------------------------------------------------------
def checkMethodOverride(name, number, docs):
theDef = theImp = None
if name in methodOverrideMap:
item = methodOverrideMap[name]
try:
if item[0] != 0:
name = item[0]
if item[1] != 0:
theDef = ' ' + (item[1] % name)
if item[2] != 0:
theImp = item[2] % ('wxStyledTextCtrl::'+name, number) + '\n}'
if item[3] != 0:
docs = item[3]
except:
print("************* " + name)
raise
return name, theDef, theImp, docs
#----------------------------------------------------------------------------
def makeArgString(param):
if not param:
return '0'
typ, name = param
if typ == 'string':
return '(sptr_t)(const char*)wx2stc(%s)' % name
if typ == 'colour':
return 'wxColourAsLong(%s)' % name
return name
#----------------------------------------------------------------------------
def makeParamString(param1, param2):
def doOne(param):
if param:
aType = paramTypeMap.get(param[0], param[0])
return aType + ' ' + param[1]
else:
return ''
st = doOne(param1)
if st and param2:
st = st + ', '
st = st + doOne(param2)
return st
#----------------------------------------------------------------------------
def parseVal(line, values, docs):
name, val = line.split('=')
# remove prefixes such as SCI, etc.
for old, new in valPrefixes:
lo = len(old)
if name[:lo] == old:
if new is None:
return
name = new + name[lo:]
# add it to the list
values.append( ('wxSTC_' + name, val, docs) )
#----------------------------------------------------------------------------
funregex = re.compile(r'\s*([a-zA-Z0-9_]+)' # <ws>return type
'\s+([a-zA-Z0-9_]+)=' # <ws>name=
'([0-9]+)' # number
'\(([ a-zA-Z0-9_]*),' # (param,
'([ a-zA-Z0-9_]*),*\)') # param)
def parseFun(line, methods, docs, values, is_const):
def parseParam(param):
param = param.strip()
if param == '':
param = None
else:
param = tuple(param.split())
return param
mo = funregex.match(line)
if mo is None:
print("***** Line doesn't match! : %s" % line)
retType, name, number, param1, param2 = mo.groups()
param1 = parseParam(param1)
param2 = parseParam(param2)
# Special case. For the key command functions we want a value defined too
num = int(number)
for v in cmdValues:
if (type(v) == type(()) and v[0] <= num <= v[1]) or v == num:
parseVal('CMD_%s=%s' % (name.upper(), number), values, docs)
# if we are not also doing a function for CMD values, then
# just return, otherwise fall through to the append blow.
if not FUNC_FOR_CMD:
return
methods.append( (retType, name, number, param1, param2, tuple(docs),
is_const or name in constNonGetterMethods) )
#----------------------------------------------------------------------------
def main(args):
# TODO: parse command line args to replace default input/output files???
if not os.path.exists(IFACE):
print('Please run this script from src/stc subdirectory.')
sys.exit(1)
# Now just do it
processIface(IFACE, H_TEMPLATE, CPP_TEMPLATE, H_DEST, CPP_DEST, DOCSTR_DEST)
if __name__ == '__main__':
main(sys.argv)
#----------------------------------------------------------------------------
|
[
"rickyzhang@gmail.com"
] |
rickyzhang@gmail.com
|
7be77a226991e8e5cd0cfa304d5c6e570a73c828
|
75eac06d5714843f1f4a1ead6d8a3164adcb9a61
|
/csqa/models/bert_sep.py
|
2f14af9e1c38b1fd04d1c54e957139e86b27b5be
|
[] |
no_license
|
Shuailong/csqa
|
0b3b8de0fc139d84c4841a948fff69a3d0855326
|
bc03dfbb1abe8fd37feee2870210f4209ad1d6af
|
refs/heads/master
| 2022-01-04T17:52:53.909954
| 2020-03-28T04:59:45
| 2020-03-28T04:59:45
| 181,131,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,157
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Shuailong
# @Email: liangshuailong@gmail.com
# @Date: 2019-05-18 23:07:29
# @Last Modified by: Shuailong
# @Last Modified time: 2019-05-18 23:07:39
import logging
from typing import Any, Dict, List, Optional
from overrides import overrides
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, FeedForward
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.modules.attention import DotProductAttention
from allennlp.nn.util import weighted_sum
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("csqa-bert-sep")
class CSQABertSep(Model):
"""
This class implements baseline Bert model for commonsenseqa dataset descibed in NAACL 2019 paper
CommonsenseQA: A Question Answering Challenge Targeting Commonsense Knowledge [https://arxiv.org/abs/1811.00937].
In this set-up, a single instance is a list of question answer pairs, and an answer index to indicate
which one is correct.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``qa_pairs`` ``TextFields`` we get as input to the model.
dropout : ``float``, optional (default=0.2)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
"""
def __init__(self, vocab: Vocabulary,
bert: TextFieldEmbedder,
classifier: FeedForward,
dropout: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self._bert = bert
self._classifier = classifier
if dropout:
self.dropout = torch.nn.Dropout(dropout)
else:
self.dropout = None
self._pooler = FeedForward(input_dim=bert.get_output_dim(),
num_layers=1,
hidden_dims=bert.get_output_dim(),
activations=torch.tanh)
check_dimensions_match(bert.get_output_dim() * 2, classifier.get_input_dim(),
"bert embedding dim", "classifier input dim")
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
choices: Dict[str, torch.LongTensor],
evidence: Dict[str, torch.LongTensor],
answer_index: torch.IntTensor = None,
metadata: List[Dict[str, Any]
] = None # pylint:disable=unused-argument
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
qa_pairs : Dict[str, torch.LongTensor]
From a ``ListField``.
answer_index : ``torch.IntTensor``, optional
From an ``IndexField``. This is what we are trying to predict.
If this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question ID, question and choices for each instance
in the batch. The length of this list should be the batch size, and each dictionary
should have the keys ``qid``, ``question``, ``choices``, ``question_tokens`` and
``choices_tokens``.
Returns
-------
An output dictionary consisting of the followings.
qid : List[str]
A list consisting of question ids.
answer_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_options=5)`` representing unnormalised log
probabilities of the choices.
answer_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_options=5)`` representing probabilities of the
choices.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
# batch, seq_len -> batch, seq_len, emb
question_hidden = self._bert(question)
batch_size, emb_size = question_hidden.size(0), question_hidden.size(2)
question_hidden = question_hidden[..., 0, :] # batch, emb
# batch, 5, seq_len -> batch, 5, seq_len, emb
choice_hidden = self._bert(choices, num_wrapping_dims=1)
choice_hidden = choice_hidden[..., 0, :] # batch, 5, emb
if self.dropout:
question_hidden = self.dropout(question_hidden)
choice_hidden = self.dropout(choice_hidden)
question_hidden = question_hidden.unsqueeze(
1).expand(batch_size, 5, emb_size)
cls_hidden = torch.cat([question_hidden, choice_hidden],
dim=-1)
# batch,5,emb*2
# the final MLP -- apply dropout to input, and MLP applies to hidden
answer_logits = self._classifier(cls_hidden).squeeze(-1) # batch, 5
answer_probs = torch.nn.functional.softmax(answer_logits, dim=-1)
qids = [m['qid'] for m in metadata]
output_dict = {"answer_logits": answer_logits,
"answer_probs": answer_probs,
"qid": qids}
if answer_index is not None:
answer_index = answer_index.squeeze(-1) # batch
loss = self._loss(answer_logits, answer_index)
self._accuracy(answer_logits, answer_index)
output_dict["loss"] = loss
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {'accuracy': self._accuracy.get_metric(reset)}
|
[
"liangshuailong@gmail.com"
] |
liangshuailong@gmail.com
|
f9757cd5f5931e24e90a9be34c09ca15d7bdbedd
|
f0adca7cac7fb12cdb89e7e821559fe2603bf4bc
|
/src/234/recipe_234_02.py
|
029ab82d6382993f4d8564ed733634fc696da9c6
|
[] |
no_license
|
eriamavro/python-recipe-src
|
dccfa06bc56fcc713f8da9e466f04d07c1f961f0
|
d14f3e4cd885515e9a9a7b8e3f064609c8e50fad
|
refs/heads/master
| 2023-02-13T02:08:44.531621
| 2021-01-14T12:03:05
| 2021-01-14T12:03:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
import requests
import json
payload = {'key1': 'value1', 'key2': 'value2'}
url = "http://httpbin.org/post"
r = requests.post(url, json=json.dumps(payload))
print(r.text)
|
[
"kurozumi.ta@gmail.com"
] |
kurozumi.ta@gmail.com
|
52cf938030ca7e71c4454b452c5e3b75018fc9a7
|
cb6c37c49cc6caef094160ca132f80807e6db933
|
/flexiretail_ee_advance/models/purchase_order.py
|
b0854f4223e47c7a36f9119db6f0963f0fa24c4d
|
[] |
no_license
|
Alimentosecocare/orange
|
07cb481c42fdc3e151d884255092842eeabae298
|
f67f64314777c7bf395126e6f6d019c381f3ef4d
|
refs/heads/master
| 2022-04-22T17:23:02.499574
| 2020-04-23T16:04:52
| 2020-04-23T16:04:52
| 258,229,315
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,531
|
py
|
# -*- coding: utf-8 -*-
#################################################################################
# Author : Acespritech Solutions Pvt. Ltd. (<www.acespritech.com>)
# Copyright(c): 2012-Present Acespritech Solutions Pvt. Ltd.
# All Rights Reserved.
#
# This program is copyright property of the author mentioned above.
# You can`t redistribute it and/or modify it.
#
#################################################################################
from openerp import models, fields, api, _
from datetime import datetime
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
@api.model
def create_po(self,vals):
order_lst = []
for k, v in vals.get('product_detail').items():
product_id = self.env['product.product'].browse(int(k))
qty = int(v)
price_unit = 0.0
product_supplierinfo_id = self.env['product.supplierinfo'].search([('name', '=', vals.get('supplier_id')),
('product_tmpl_id', '=', product_id.product_tmpl_id.id)], limit=1, order="id desc")
if product_supplierinfo_id:
price_unit = product_supplierinfo_id.price
if not product_supplierinfo_id:
price_unit = product_id.standard_price
order_lst.append((0, 0, {
'date_planned': datetime.now(),
'name': product_id.name,
'price_unit': price_unit,
'product_id': product_id.id,
'product_qty': qty,
'product_uom': product_id.uom_po_id.id or False,
}))
purchase_order_obj = self.env['purchase.order']
purchase_order_obj = purchase_order_obj.create({
'partner_id': vals.get('supplier_id'),
'date_order': datetime.now(),
})
purchase_order_obj.onchange_partner_id()
purchase_order_obj.order_line = order_lst
purchase_order_obj.order_line._compute_tax_id()
if vals.get('send_mail') == 'on':
ir_model_data = self.env['ir.model.data']
try:
if self.env.context.get('send_rfq', False):
template_id = ir_model_data.get_object_reference('purchase', 'email_template_edi_purchase')[1]
else:
template_id = ir_model_data.get_object_reference('purchase', 'email_template_edi_purchase_done')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(self.env.context or {})
ctx.update({
'default_model': 'purchase.order',
'default_res_id': purchase_order_obj.id,
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'custom_layout': "purchase.mail_template_data_notification_email_purchase_order",
'force_email': True
})
template_obj = self.env['mail.template'].browse(template_id)
template_obj.with_context(ctx=ctx).send_mail(purchase_order_obj.id, force_send=True)
return [purchase_order_obj.id, purchase_order_obj.name]
#vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"54725593+RetargetingCL@users.noreply.github.com"
] |
54725593+RetargetingCL@users.noreply.github.com
|
3b9aabeae147893871834d281d7beb6941d1650e
|
8b1faf5c239723317d530420ab679496c5b56b7c
|
/qwe.py
|
9c5874ac760fc1e6e62d07efe633268598a2c186
|
[] |
no_license
|
mystery2828/pythonfiles
|
b42ccee621532e1900c580b684ecba17098c27dd
|
10ce88047f0226fcc6e49bc27a6af4b427e5b566
|
refs/heads/master
| 2021-07-23T14:34:43.967874
| 2021-07-07T17:14:24
| 2021-07-07T17:14:24
| 227,979,744
| 1
| 3
| null | 2020-10-03T08:51:31
| 2019-12-14T06:53:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,002
|
py
|
#code
def numpair(s1,s2):
count = 0
s2.sort()
for ele in s1:
for i in range(len(s2)):
if ele != s2[i]:
if ele == 0:
break
if ele>1:
if s2[i]>ele:
count+=(len(s2)-i)
break
if s2[i] == 1 and ele != 1:
count+=(s1.count(1))
if ele == 2 and (s2[i] == 3 or s2[i] == 4):
count-=1
if ele == 1:
count += s2.count(0)
if ele == 3 and s2[i]==2:
count+=1
continue
print(count)
t = int(input())
for i in range(t):
n = input().split()
s1 = list(map(int,input().split()))
s2 = list(map(int,input().split()))
numpair(s1,s2)
|
[
"noreply@github.com"
] |
noreply@github.com
|
85297224463e89bbcee3a6b86337b908c5929cb2
|
8a0e14299d8b915c0a909cf9fa9a86589dc63d76
|
/python/ray/tune/automl/__init__.py
|
cab4c4de4dab106306090e7cdc11ee1396f99abd
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
natashamjaques/ray
|
795e4271c3c5f3e261327afea40b81ffe6f362ac
|
aca9dd5ee7a8fef508a5383fdd26ad8ccdcb16e4
|
refs/heads/master
| 2020-04-12T05:58:15.680359
| 2019-03-06T22:08:10
| 2019-03-06T22:08:10
| 162,337,948
| 3
| 2
|
Apache-2.0
| 2018-12-18T19:47:02
| 2018-12-18T19:47:01
| null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ray.tune.automl.genetic_searcher import GeneticSearch
from ray.tune.automl.search_policy import GridSearch, RandomSearch
from ray.tune.automl.search_space import SearchSpace, \
ContinuousSpace, DiscreteSpace
__all__ = [
"ContinuousSpace",
"DiscreteSpace",
"SearchSpace",
"GridSearch",
"RandomSearch",
"GeneticSearch",
]
|
[
"rliaw@berkeley.edu"
] |
rliaw@berkeley.edu
|
927b1fcf7f529b1079513b2b00e403baa8765239
|
ce3077f0cf4e8db7d8541e371193b3b6ace8da28
|
/cherrypy.wsgi
|
a685cdd89b9b79cde929c40b4d646965072bca15
|
[] |
no_license
|
clarakosi/wsgi-benchmark
|
1989b5b6ba084462466b6085011a9f0b31f01d0c
|
200036807324b313f98997f301791514acf2720f
|
refs/heads/master
| 2020-04-08T17:27:34.307726
| 2018-12-05T16:18:16
| 2018-12-05T16:18:16
| 159,567,999
| 0
| 0
| null | 2018-12-05T16:23:14
| 2018-11-28T21:32:36
|
Python
|
UTF-8
|
Python
| false
| false
| 292
|
wsgi
|
from cheroot.wsgi import Server as WSGIServer, PathInfoDispatcher
from app import application
d = PathInfoDispatcher({'/': application})
server = WSGIServer(('0.0.0.0', 5000), d)
if __name__ == '__main__':
try:
server.start()
except KeyboardInterrupt:
server.stop()
|
[
"candrewwani@gmail.com"
] |
candrewwani@gmail.com
|
34e55c9eb4ff0873f56820e807557d8c6cb55bb7
|
214dde26c268d1d0b7991318c5e2d43aa27af89b
|
/visualization/capital_flow/app.py
|
9c72a241df388a8420f220fc97f2591d354deea3
|
[] |
no_license
|
hellobiek/smart_deal_tool
|
f1846903ac402257bbe92bd23f9552970937d50e
|
ba8aad0a37843362f5833526921c6f700fb881f1
|
refs/heads/master
| 2022-09-04T04:41:34.598164
| 2022-08-04T22:04:09
| 2022-08-04T22:04:09
| 88,258,362
| 36
| 14
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,214
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
from os.path import abspath, dirname
sys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))
import dash
import dash_table
import const as ct
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
from flask_caching import Cache
from common import str_of_num
from rstock import RIndexStock
from cstock_info import CStockInfo
from visualization.dash.hgt import HGT
from visualization.dash.rzrq import RZRQ
from datetime import datetime, timedelta
from dash.dependencies import Input, Output
top100 = None
add_data = None
del_data = None
redis_host = "127.0.0.1"
dbinfo = ct.OUT_DB_INFO
mstart = None
mend = None
model_dir = "/Volumes/data/quant/stock/data/models"
report_dir = "/Volumes/data/quant/stock/data/tdx/report"
cal_file_path = "/Volumes/data/quant/stock/conf/calAll.csv"
stocks_dir = "/Volumes/data/quant/stock/data/tdx/history/days"
bonus_path = "/Volumes/data/quant/stock/data/tdx/base/bonus.csv"
rvaluation_dir = "/Volumes/data/quant/stock/data/valuation/rstock"
base_stock_path = "/Volumes/data/quant/stock/data/tdx/history/days"
valuation_path = "/Volumes/data/quant/stock/data/valuation/reports.csv"
pledge_file_dir = "/Volumes/data/quant/stock/data/tdx/history/weeks/pledge"
report_publish_dir = "/Volumes/data/quant/stock/data/crawler/stock/financial/report_announcement_date"
tushare_file_path = "/Users/hellobiek/Documents/workspace/python/quant/smart_deal_tool/configure/tushare.json"
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets = external_stylesheets, suppress_callback_exceptions = True)
CACHE_CONFIG = {
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': '127.0.0.1:6579'
}
cache = Cache()
cache.init_app(app.server, config=CACHE_CONFIG)
app.layout = html.Div([
html.H1('资金流情况'),
dcc.DatePickerRange(
id = 'date-picker-range',
min_date_allowed = datetime(2017, 1, 1),
max_date_allowed = datetime.now(),
initial_visible_month = datetime.now(),
start_date = datetime.now() - timedelta(7),
end_date = datetime.now()
),
html.Div(id='output-start-date', style={'display': 'none'}),
html.Div(id='output-end-date', style={'display': 'none'}),
dcc.Tabs(id="tabs", value='tabs', children=[
dcc.Tab(label='港股通', value='hk-flow'),
dcc.Tab(label='融资融券', value='leveraged-funds'),
]),
html.Div(id='hold-situation', children='hgt-hold-situation-table'),
])
@cache.memoize()
def get_money_flow_data_from_rzrq(start, end):
rzrq_client = RZRQ(dbinfo = ct.OUT_DB_INFO, redis_host = redis_host, fpath = tushare_file_path)
data = rzrq_client.get_data("ALL", start, end)
if start not in set(data.date.tolist()):
return None, None, "{} 没有数据".format(start)
if end not in set(data.date.tolist()):
return None, None, "{} 没有数据".format(end)
data['code'] = data['code'].str[0:6]
data['rzcje'] = data['rzmre'] + data['rzche']
data = data.reset_index(drop = True)
rstock = RIndexStock(dbinfo = ct.OUT_DB_INFO, redis_host = redis_host)
rstock_info = rstock.get_data(end)
rstock_info = rstock_info.drop('date', axis = 1)
stock_info_client = CStockInfo(dbinfo = ct.OUT_DB_INFO, redis_host = redis_host, stocks_dir = stocks_dir, base_stock_path = base_stock_path)
base_df = stock_info_client.get()
base_df = base_df[['code', 'name', 'timeToMarket', 'industry', 'sw_industry']]
rstock_info = pd.merge(rstock_info, base_df, how='inner', on=['code'])
df = pd.merge(data, rstock_info, how='left', on=['code'])
df['asserts'] = df['close'] * df['outstanding'] / 10e7
df['asserts'] = round(df['asserts'], 2)
df['rzye'] = round(df['rzye'], 2)
df['rzcje'] = round(df['rzcje'], 2)
df['rzche'] = round(df['rzche'], 2)
df['rzmre'] = round(df['rzmre'], 2)
df['rzrqye'] = round(df['rzrqye'], 2)
df = df[['date', 'code', 'name', 'rzye', 'rzmre', 'rzche', 'rzrqye', 'rzcje', 'asserts', 'industry', 'sw_industry']]
df = df.dropna(axis=0, how='any')
df = df.reset_index(drop = True)
s_data = df.loc[df.date == start]
s_data = s_data.reset_index(drop = True)
e_data = df.loc[df.date == end]
e_data = e_data.reset_index(drop = True)
return s_data, e_data, None
@cache.memoize()
def get_top20_stock_info_from_hgt(cdate):
hgt_client = HGT(dbinfo = ct.OUT_DB_INFO, redis_host = redis_host)
info = hgt_client.get_top10_info(cdate)
info['net_turnover'] = info['buy_turnover'] - info['sell_turnover']
info = info.sort_values(by = 'net_turnover', ascending= False)
info = info.drop('rank', axis = 1)
info = info.reset_index(drop = True)
info['total_turnover'] = info['total_turnover'].apply(lambda x:str_of_num(x))
info['net_turnover'] = info['net_turnover'].apply(lambda x:str_of_num(x))
info['buy_turnover'] = info['buy_turnover'].apply(lambda x:str_of_num(x))
info['sell_turnover'] = info['sell_turnover'].apply(lambda x:str_of_num(x))
return info
@cache.memoize()
def get_money_flow_data_from_hgt(start, end):
hgt_client = HGT(dbinfo = ct.OUT_DB_INFO, redis_host = redis_host)
sh_data = hgt_client.get_data("ALL_SH", start, end)
sz_data = hgt_client.get_data("ALL_SZ", start, end)
if start not in sh_data.date.tolist():
return None, None, "{} 没有数据".format(start)
if end not in sh_data.date.tolist():
return None, None, "{} 没有数据".format(end)
sh_data = sh_data.loc[(sh_data.date == start) | (sh_data.date == end)]
sz_data = sz_data.loc[(sz_data.date == start) | (sz_data.date == end)]
sh_data = sh_data.append(sz_data)
sh_data = sh_data.reset_index(drop = True)
rstock = RIndexStock(dbinfo = ct.OUT_DB_INFO, redis_host = redis_host)
rstock_info = rstock.get_data(end)
rstock_info = rstock_info[['code', 'totals']]
stock_info_client = CStockInfo(dbinfo = ct.OUT_DB_INFO, redis_host = redis_host, stocks_dir = stocks_dir, base_stock_path = base_stock_path)
base_df = stock_info_client.get()
base_df = base_df[['code', 'timeToMarket', 'industry', 'sw_industry']]
rstock_info = pd.merge(rstock_info, base_df, how='inner', on=['code'])
df = pd.merge(sh_data, rstock_info, how='left', on=['code'])
df = df.dropna(axis=0, how='any')
df = df.reset_index(drop = True)
df['percent'] = 100 * df['volume'] / df['totals']
df = df[['date', 'code', 'name', 'timeToMarket', 'industry', 'sw_industry', 'percent', 'volume', 'totals']]
start_data = df.loc[df.date == start]
start_data = start_data.sort_values(by = 'percent', ascending= False)
start_data = start_data.reset_index(drop = True)
end_data = df.loc[df.date == end]
end_data = end_data.sort_values(by = 'percent', ascending= False)
end_data = end_data.reset_index(drop = True)
top100 = end_data.loc[end_data.percent > 5]
top100 = top100.reset_index(drop = True)
top100['percent'] = round(top100['percent'], 2)
start_data = start_data[['code', 'percent']]
start_data = start_data.rename(columns = {"percent": "spercent"})
cdata = pd.merge(end_data, start_data, how='left', on=['code'])
cdata = cdata.dropna(axis=0, how='any')
cdata['delta_percent'] = cdata['percent'] - cdata['spercent']
cdata = cdata[['date', 'code', 'name', 'timeToMarket', 'industry', 'sw_industry', 'delta_percent', 'volume', 'totals']]
cdata['delta_percent'] = round(cdata['delta_percent'], 2)
cdata = cdata.sort_values(by = 'delta_percent', ascending= False)
cdata = cdata.reset_index(drop = True)
add_data = cdata.loc[cdata.delta_percent > 0]
add_data = add_data.sort_values(by = 'delta_percent', ascending= False)
add_data = add_data.head(30)
add_data = add_data.reset_index(drop = True)
del_data = cdata.loc[cdata.delta_percent < 0]
del_data = del_data.sort_values(by = 'delta_percent', ascending= True)
del_data = del_data.head(30)
del_data = del_data.reset_index(drop = True)
return top100, add_data, del_data
@app.callback(
[Output('output-start-date', 'children'), Output('output-end-date', 'children')],
[Input('date-picker-range', 'start_date'), Input('date-picker-range', 'end_date')])
def update_date(start_date, end_date):
global mstart, mend
if start_date is not None and end_date is not None:
mstart = start_date.split(' ')[0]
mend = end_date.split(' ')[0]
return mstart, mend
return None, None
@app.callback(Output('hold-situation', 'children'),
[Input('tabs', 'value'), Input('output-start-date', 'children'), Input('output-end-date', 'children')])
def render_content(model_name, start_date, end_date):
if model_name == 'hk-flow':
global top100, add_data, del_data
top100, add_data, del_data = get_money_flow_data_from_hgt(start_date, end_date)
top20_info = get_top20_stock_info_from_hgt(end_date)
if top20_info is None or top20_info.empty:
return html.Div([html.H3('{} : 二十大热门股没有数据'.format(end_date))])
else:
if top100 is None:
return html.Div([
html.H3('{}日的20大成交额股票(按照净买入额排序)'.format(end_date)),
dash_table.DataTable(
id = 'hgt-top20-data',
columns = [{"name": i, "id": i} for i in top20_info.columns],
data = top20_info.to_dict('records'),
style_cell={'textAlign': 'center'},
sort_action = "native",
),
html.H3('{}: 港股通数据有错误'.format(end_date))])
else:
return html.Div([
html.H3('{}日的20大成交额股票(按照净买入额排序)'.format(end_date)),
dash_table.DataTable(
id = 'hgt-top20-data',
columns = [{"name": i, "id": i} for i in top20_info.columns],
data = top20_info.to_dict('records'),
style_cell={'textAlign': 'center'},
sort_action = "native",
),
html.H3('{}日持股比例最多的100只股票(持有股本/总股本)'.format(end_date)),
dash_table.DataTable(
id = 'hgt-data',
columns = [{"name": i, "id": i} for i in top100.columns],
data = top100.to_dict('records'),
style_cell={'textAlign': 'center'},
sort_action = "native",
),
html.H3('持股比例增加最多的30只股票(持有股本/总股本)'),
dash_table.DataTable(
id = 'hgt-add-data',
columns = [{"name": i, "id": i} for i in add_data.columns],
data = add_data.to_dict('records'),
style_cell={'textAlign': 'center'},
sort_action = "native",
),
html.H3('持股比例减少最多的30只股票(持有股本/总股本)'),
dash_table.DataTable(
id = 'hgt-del-data',
columns = [{"name": i, "id": i} for i in del_data.columns],
data = del_data.to_dict('records'),
style_cell={'textAlign': 'center'},
sort_action = "native",
),
])
elif model_name == 'leveraged-funds':
s_data, e_data, msg = get_money_flow_data_from_rzrq(start_date, end_date)
if s_data is None or e_data is None:
return html.Div([html.H3(msg)])
add_data = e_data[['code', 'name', 'rzrqye', 'industry']]
add_data = add_data.rename(columns = {"rzrqye": "end_rzrqye"})
del_data = s_data[['code', 'rzrqye']]
del_data = del_data.rename(columns = {"rzrqye": "start_rzrqye"})
df = pd.merge(add_data, del_data, how='left', on=['code'])
df['delta_rzrqye'] = round(df['end_rzrqye'] - df['start_rzrqye'], 2)
df = df[['code', 'name', 'industry', 'start_rzrqye', 'end_rzrqye', 'delta_rzrqye']]
add_data = df.nlargest(30, 'delta_rzrqye')
df['delta_rzrqye'] = df['delta_rzrqye'] * -1
del_data = df.nlargest(30, 'delta_rzrqye')
del_data['delta_rzrqye'] = del_data['delta_rzrqye'] * -1
return html.Div([
html.H3('{}日的融资成交额股票(按照净买入额排序)'.format(end_date)),
dash_table.DataTable(
id = 'rzrq-data',
columns = [{"name": i, "id": i} for i in e_data.columns],
data = e_data.to_dict('records'),
style_cell={'textAlign': 'center'},
sort_action = "native",
),
html.H3('持股比例增加最多的30只股票(融资融券余额/流通市值)'),
dash_table.DataTable(
id = 'rzrq-add-data',
columns = [{"name": i, "id": i} for i in add_data.columns],
data = add_data.to_dict('records'),
style_cell={'textAlign': 'center'},
sort_action = "native",
),
html.H3('持股比例减少最多的30只股票(融资融券余额/流通市值)'),
dash_table.DataTable(
id = 'rzrq-del-data',
columns = [{"name": i, "id": i} for i in del_data.columns],
data = del_data.to_dict('records'),
style_cell={'textAlign': 'center'},
sort_action = "native",
),
])
if __name__ == '__main__':
#start_date = '2020-07-03'
#end_date = '2020-07-08'
#s_data, e_data, msg = get_money_flow_data_from_rzrq(start_date, end_date)
#import /Users/hellobiek/Documents/workspace/python/quant/smart_deal_tool/visualization/capital_flowpdb
#pdb.set_trace()
app.run_server(debug = True, port = 9998)
|
[
"hellobiek@gmail.com"
] |
hellobiek@gmail.com
|
9951bb6bb6601a27c6c70eacedafd833535407ea
|
2dae88207b8b424617f8e283ede17616e8009881
|
/Algorithms/Contests/MagicLines/polar.py
|
f304fbf0ba4fc9f54136fa9439aaff66f9f59e67
|
[] |
no_license
|
ramvibhakar/hacker_rank
|
32083851190a40f969fd6a657633c68e7ecbe1d5
|
2b169b1cfbf43bc4aca0a4a6921f77befef7ab85
|
refs/heads/master
| 2016-09-10T09:31:05.397702
| 2015-06-25T11:27:18
| 2015-06-25T11:27:18
| 28,509,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
from math import hypot, atan2, pi
def polar(x, y):
k = 180.0 * atan2(y, x) / pi
if k<0: k+=360
return k,hypot(x, y)
n = int(raw_input())
ans1 = []
ans = []
for i in range(n):
x,y = map(int,raw_input().split())
ans.append([x,y])
ans1.append(polar(x,y))
#print polar(3,5)
ans = zip(ans,ans1)
for i in ind:
print ans[i][0],ans[i][1]
# Enter your code here. Read input from STDIN. Print output to STDOUT
|
[
"ramvibhakar@gmail.com"
] |
ramvibhakar@gmail.com
|
3b8b3870dabed4b7af889b4f5e0cb43e93880dae
|
39d78a4215957a63602289f7ed2ad414f91cf604
|
/function_library.py
|
598a2305d336b52568e7ff688392fa900075711b
|
[] |
no_license
|
ElinorBaker/Team9
|
61c2a194e60f4ae3c465429a9b49cca288a049f1
|
82685efa60ad8f226b41c1edf7638039f167eb46
|
refs/heads/main
| 2023-04-11T19:46:17.722515
| 2021-05-04T18:46:45
| 2021-05-04T18:46:45
| 359,920,347
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
# Function 1: average
def average(av):
avg = sum(av)/len(av)
return avg
# Function 2: go back to MAIN MENU
def another_stat():
top_loop_question = input("Would you like to go back to the MAIN MENU to see another statistic? (Y/N)").upper()
if top_loop_question == "N":
print("Goodbye!")
exit()
elif top_loop_question not in ["Y", "N"]:
print('Invalid response. Transferring you back to the MAIN MENU.')
return top_loop_question
return top_loop_question == "Y"
# Function 3: another month
def another_month():
another_month_question = input("Would you like to see this statistic for another month? (Y/N)").upper()
if another_month_question == "N":
print("Goodbye!")
exit()
# Ideally instead of exit() would be function another_stat(), but nested loop 'return' does not go where expected...
elif another_month_question == "Y":
return
|
[
"lauk3158@gmail.com"
] |
lauk3158@gmail.com
|
c2e8140fedf0f36838a456913d4b80fc6f7a9708
|
375aacf49a295e08b9b01fa4b7681fbb3a92d1c3
|
/forallpeople/dimensions.py
|
b5367fee5bc99cb706006cd209a9688623914b5b
|
[
"Apache-2.0"
] |
permissive
|
bevi-rosso/forallpeople
|
5653386f69eeb2bb576d5a62231db39b77412b07
|
f3bb2da1aa505dfd1dd0335b8d58f74e1305542c
|
refs/heads/master
| 2023-02-26T03:21:54.368851
| 2021-01-13T05:02:33
| 2021-01-13T05:02:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
# Copyright 2020 Connor Ferster
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
class Dimensions(NamedTuple):
kg: float
m: float
s: float
A: float
cd: float
K: float
mol: float
|
[
"CFerster@rjc.ca"
] |
CFerster@rjc.ca
|
a89b111b84cc255fa97bb88a053f3f5a5369505e
|
6a2c2683279c5445e49d4a1490ba5e58ba6911f3
|
/vis/vis3.py
|
76c803011075c249bb07e297383c3c6d8f883445
|
[] |
no_license
|
foongminwong/python-visualization-practice
|
ba3e59335719784a6d02f44483c59b43495f7716
|
ab593440aab01e8095abcb6442304bf620d028d0
|
refs/heads/master
| 2020-03-28T14:26:29.782317
| 2019-05-25T16:50:37
| 2019-05-25T16:50:37
| 148,486,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
import numpy as np
import bertini_real
fig = plt.figure ()
ax = fig.add_subplot (1, 1, 1, projection = '3d', aspect = 1)
sphere_data = bertini_real.data.ReadMostRecent();
sphere_tuples = sphere_data.surface.surface_sampler_data
f = int(sphere_tuples[0][0])
s = int(sphere_tuples[0][1])
t = int(sphere_tuples[0][2])
f1 = sphere_data.vertices[f]
s1 = sphere_data.vertices[s]
t1 = sphere_data.vertices[t]
fx= f1['point'][0].real
fy= f1['point'][1].real
fz= f1['point'][2].real
sx=s1['point'][0].real
sy=s1['point'][1].real
sz=s1['point'][2].real
tx = t1['point'][0].real
ty = t1['point'][1].real
tz = t1['point'][2].real
ff = [fx,fy,fz]
ss = [sx,sy,sz]
tt = [tx,ty,tz]
ax.scatter(ff,ss,tt)
ax.plot3D(ff,ss,tt)
plt.show()
|
[
"wongfoongmin@hotmail.com"
] |
wongfoongmin@hotmail.com
|
658eaf75d73117092aafab19e19d36afddfcf710
|
329e3aa7ee29f19e8648bd4807d43db23e77b70c
|
/src/sendmail.py
|
bce2f717864f0d63c3a01f147b090cf47884916a
|
[] |
no_license
|
bowdbeg/payslip
|
cc013c1c48e5993e7a30bab9659c7b30f27bfd28
|
239a5f90df0ab4df0096bff37df6cd9a2d89e9d2
|
refs/heads/master
| 2022-12-30T14:42:50.328748
| 2020-10-17T15:47:17
| 2020-10-17T15:47:17
| 291,071,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,932
|
py
|
"""
Send E-Mail with GMail.
Usage:
sendmail.py <sender> <to> <subject> <message_text_file_path> [--attach_file_path=<file_path>] [--cc=<cc>]
sendmail.py -h | --help
sendmail.py --version
Options:
-h --help Show this screen.
--version Show version.
--attach_file_path=<file_path> Path of file attached to message.
--cc=<cc> cc email address list(separated by ','). Default None.
"""
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import base64
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.audio import MIMEAudio
from pathlib import Path
from email.mime.multipart import MIMEMultipart
import mimetypes
from apiclient import errors
from gmail_credential import get_credential
from docopt import docopt
import logging
logger = logging.getLogger(__name__)
def create_message(sender, to, subject, message_text, cc=None):
"""
MIMEText を base64 エンコードする
"""
enc = "utf-8"
message = MIMEText(message_text.encode(enc), _charset=enc)
message["to"] = to
message["from"] = sender
message["subject"] = subject
if cc:
message["Cc"] = cc
encode_message = base64.urlsafe_b64encode(message.as_bytes())
return {"raw": encode_message.decode()}
def create_message_with_attachment(
sender, to, subject, message_text, file_path, cc=None
):
"""
添付ファイルつきのMIMEText を base64 エンコードする
"""
message = MIMEMultipart()
message["to"] = to
message["from"] = sender
message["subject"] = subject
if cc:
message["Cc"] = cc
# attach message text
enc = "utf-8"
msg = MIMEText(message_text.encode(enc), _charset=enc)
message.attach(msg)
content_type, encoding = mimetypes.guess_type(file_path)
if content_type is None or encoding is not None:
content_type = "application/octet-stream"
main_type, sub_type = content_type.split("/", 1)
if main_type == "text":
with open(file_path, "rb") as fp:
msg = MIMEText(fp.read(), _subtype=sub_type)
elif main_type == "image":
with open(file_path, "rb") as fp:
msg = MIMEImage(fp.read(), _subtype=sub_type)
elif main_type == "audio":
with open(file_path, "rb") as fp:
msg = MIMEAudio(fp.read(), _subtype=sub_type)
else:
with open(file_path, "rb") as fp:
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
p = Path(file_path)
msg.add_header("Content-Disposition", "attachment", filename=p.name)
message.attach(msg)
encode_message = base64.urlsafe_b64encode(message.as_bytes())
return {"raw": encode_message.decode()}
def send_message(service, user_id, message):
"""
メールを送信する
Parameters
----------
service : googleapiclient.discovery.Resource
Gmail と通信するたえのリソース
user_id : str
利用者のID
message : dict
"raw" を key, base64 エンコーディングされた MIME Object を value とした dict
Returns
----------
なし
"""
try:
sent_message = (
service.users().messages().send(userId=user_id, body=message).execute()
)
logger.info("Message Id: %s" % sent_message["id"])
return None
except errors.HttpError as error:
logger.info("An error occurred: %s" % error)
raise error
# メイン処理
def main(sender, to, subject, message_text, attach_file_path, cc=None):
# アクセストークンの取得とサービスの構築
creds = get_credential()
service = build("gmail", "v1", credentials=creds, cache_discovery=False)
if attach_file_path:
# メール本文の作成
message = create_message_with_attachment(
sender, to, subject, message_text, attach_file_path, cc=cc
)
else:
message = create_message(
sender, to, subject, message_text, cc=cc
)
# メール送信
send_message(service, "me", message)
# プログラム実行部分
if __name__ == "__main__":
arguments = docopt(__doc__, version="0.1")
sender = arguments["<sender>"]
to = arguments["<to>"]
cc = arguments["--cc"]
subject = arguments["<subject>"]
message_text_file_path = arguments["<message_text_file_path>"]
attach_file_path = arguments["--attach_file_path"]
logging.basicConfig(level=logging.DEBUG)
with open(message_text_file_path, "r", encoding="utf-8") as fp:
message_text = fp.read()
main(
sender=sender,
to=to,
subject=subject,
message_text=message_text,
attach_file_path=attach_file_path,
cc=cc,
)
|
[
"bear.kohei@gmail.com"
] |
bear.kohei@gmail.com
|
4634ce50332a8d3d3fcb0fbb8f5602a8e407e4f8
|
30b3cefec980c94329dff3ddb6760b4c45fc630e
|
/testing_descriptor.py
|
f3bbbae21834f451a650d70f0efdf9ace708ef5e
|
[] |
no_license
|
Ebajaj147/Image-Descriptor
|
c92d80f89d71cbe4f67be28cab043fe862dbad69
|
6f2223ab09bb0214a82dafd1dbcbffb3d243e5ed
|
refs/heads/main
| 2023-03-21T11:32:59.099966
| 2021-03-17T09:10:29
| 2021-03-17T09:10:29
| 346,029,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,057
|
py
|
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.applications.xception import Xception
from keras.models import load_model
from pickle import load
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True, help="Image Path")
args = vars(ap.parse_args())
img_path = args['image']
def extract_features(filename, model):
try:
image = Image.open(filename)
except:
print("ERROR: Could not open the image! Make sure the image path and extension is correct")
image = image.resize((299,299))
image = np.array(image)
# for images that have 4 channels, we convert them into 3 channels
if image.shape[2] == 4:
image = image[..., :3]
image = np.expand_dims(image, axis=0)
image = image/127.5
image = image - 1.0
feature = model.predict(image)
return feature
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
def generate_desc(model, tokenizer, photo, max_length):
in_text = 'start'
for i in range(max_length):
sqr = tokenizer.texts_to_sequences([in_text])[0]
sqr = pad_sequences([sqr], maxlen=max_length)
pred = model.predict([photo,sqr], verbose=0)
pred = np.argmax(pred)
word = word_for_id(pred, tokenizer)
if word is None:
break
in_text += ' ' + word
if word == 'end':
break
return in_text
#path = 'Flicker8k_Dataset/img1.jpg'
max_length = 32
tkzr = load(open("tokenizer.p","rb"))
mod = load_model('models/model_9.h5')
xception_model = Xception(include_top=False, pooling="avg")
pic = extract_features(img_path, xception_model)
img = Image.open(img_path)
desc = generate_desc(mod, tkzr, pic, max_length)
print("\n\n")
print(desc)
plt.imshow(img)
|
[
"noreply@github.com"
] |
noreply@github.com
|
2710cb22af5cac80f7a27b0cbd8720daac4d6440
|
41c6fceaf52da397f045b58e686d4b2065a73f6e
|
/demo3/routes/profile.py
|
211cbe5dcd33f9f699f1da880dbdd953150a48b5
|
[] |
no_license
|
TaihouAnF/CSCC01_PickEasy_team01_project
|
269270cfec76691bcafd5a36ef8a72c6501ea069
|
307ea0dec7779d5614ad5757352aa5161be230d7
|
refs/heads/master
| 2023-07-26T21:41:59.767658
| 2020-08-16T22:26:19
| 2020-08-16T22:26:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
###################################################
# #
# Includes all routes to profile pages. #
# #
###################################################
from flask import Flask, render_template, request, redirect, url_for, session, Blueprint
profile_page = Blueprint('profile_page', __name__, template_folder='templates')
from databaseHelpers.user import *
from databaseHelpers.restaurant import *
@profile_page.route('/profile.html')
@profile_page.route('/profile')
def profile():
# If someone is not logged in redirects them to login page
if 'account' not in session:
return redirect(url_for('login_page.login'))
else:
user = get_user(session['account'])
# if user is a restaurant owner
if session['account'] == 1:
rid = get_rid(session['account'])
rname = get_restaurant_name_by_rid(rid)
raddress = get_restaurant_address(rid)
return render_template('profile.html', rname = rname, raddress = raddress, user = user)
return render_template('profile.html', user = user)
@profile_page.route('/editRestaurantInfo.html', methods=['GET', 'POST'])
@profile_page.route('/editRestaurantInfo', methods=['GET', 'POST'])
def edit_restaurant_info():
# If someone is not logged in redirects them to login page
if 'account' not in session:
return redirect(url_for('login_page.login'))
# if user is a restaurant owner
if session['type'] == 1:
rid = get_rid(session['account'])
rname = get_restaurant_name_by_rid(rid)
raddress = get_restaurant_address(rid)
if request.method == 'POST':
rname = request.form['rname']
raddress = request.form['address']
restaurant = get_resturant_by_rid(rid)
errmsg = update_restaurant_information(restaurant, rname, raddress)
if not errmsg:
return redirect(url_for('profile_page.profile'))
return render_template('editRestaurantInfo.html', rname = rname, raddress = raddress, errmsg = errmsg)
return render_template('editRestaurantInfo.html', rname = rname, raddress = raddress)
else:
return redirect(url_for('home_page.home'))
|
[
"noreply@github.com"
] |
noreply@github.com
|
a7bba7e1fb6c729e6c1637759c62f88f35c6ff96
|
4bc29617a307de54a7fe300c8e863f03321bd003
|
/lib/python3.8/site-packages/trytond/__init__.py
|
4c039bb08254fc28ff51e691190a50d47b7b35e2
|
[] |
no_license
|
Davidoff2103/tryton-training
|
f594970e77646f0ffeb42eb4f903252ff0b6c201
|
8d1ec4f2b623f7ca48f38bfda2ac15c01ded35a7
|
refs/heads/master
| 2023-06-01T11:55:05.400233
| 2021-06-09T10:06:56
| 2021-06-09T10:06:56
| 375,275,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import os
import time
import warnings
from email import charset
__version__ = "5.0.35"
os.environ['TZ'] = 'UTC'
if hasattr(time, 'tzset'):
time.tzset()
if time.tzname[0] != 'UTC':
warnings.warn('Timezone must be set to UTC instead of %s' % time.tzname[0])
# set email encoding for utf-8 to 'quoted-printable'
charset.add_charset('utf-8', charset.QP, charset.QP)
|
[
"davidoff.d777@gmail.com"
] |
davidoff.d777@gmail.com
|
c618f3a535441e5c8587f2f8d2c91d6c2a046dd8
|
113f8ae533a75e9f2fdc1728661af0f19c8460a6
|
/books_app/books_app/settings.py
|
8f53b3a945f604d8a773d85e73cdd69c268b132c
|
[] |
no_license
|
PeterM358/Python-web-2021
|
cf08beaa3330495afc53e640f4a2aaf0429049e9
|
a3b7e1d1be0cc85675aaff646917d4f5b7f97b00
|
refs/heads/master
| 2023-07-09T15:09:08.868548
| 2021-07-24T13:49:22
| 2021-07-24T13:49:22
| 382,328,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,403
|
py
|
"""
Django settings for books_app project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-e05f*w&+x@+@w7-9g*7z!4^%7u+xmeb9uxz*j@!kz(e5max0c('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'books_app.books',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'books_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'books_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'books',
'USER': 'postgres',
'PASSWORD': 'asdf1234',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"estestveno"
] |
estestveno
|
f694103ad29e76cd74411c21fb687a6e63fcbdbf
|
6bda8a6e44d09397ada6ed222800e16f071674bf
|
/src/numdifftools/profiletools.py
|
4e6374add0e9bed8d01cf6a6f24116263cc71f59
|
[
"BSD-3-Clause"
] |
permissive
|
pbrod/numdifftools
|
557af2ee288339737a9e005fb0485542c13e8891
|
4f62e51d4776cc6acbdfb6268482635a487b860c
|
refs/heads/master
| 2023-07-20T19:26:53.241589
| 2022-11-14T13:39:42
| 2022-11-14T13:39:42
| 17,676,169
| 237
| 52
|
BSD-3-Clause
| 2023-07-05T15:21:37
| 2014-03-12T17:31:06
|
Python
|
UTF-8
|
Python
| false
| false
| 5,763
|
py
|
"""
This module is based on: https://zapier.com/engineering/profiling-python-boss/
See also:
https://www.pythoncentral.io/measure-time-in-python-time-time-vs-time-clock/
"""
from __future__ import absolute_import, print_function
import inspect
import cProfile
from functools import wraps
from timeit import default_timer as timer
import warnings
try:
from line_profiler import LineProfiler
def _add_all_class_methods(profiler, cls, except_=''):
for k, v in inspect.getmembers(cls, inspect.ismethod):
if k != except_:
profiler.add_function(v)
def _add_function_or_classmethod(profiler, f, args):
if isinstance(f, str): # f is a method of the
cls = args[0] # class instance
profiler.add_function(getattr(cls, f))
else:
profiler.add_function(f)
def do_profile(follow=(), follow_all_methods=False):
"""
Decorator to profile a function or class method
It uses line_profiler to give detailed reports on time spent on each
line in the code.
Pros: has intuitive and finely detailed reports. Can follow
functions in third party libraries.
Cons:
has external dependency on line_profiler and is quite slow,
so don't use it for benchmarking.
Handy tip:
Just decorate your test function or class method and pass any
additional problem function(s) in the follow argument!
If any follow argument is a string, it is assumed that the string
refers to bound a method of the class
See also
--------
do_cprofile, test_do_profile
"""
def inner(func):
def profiled_func(*args, **kwargs):
try:
profiler = LineProfiler()
profiler.add_function(func)
if follow_all_methods:
cls = args[0] # class instance
_add_all_class_methods(profiler, cls,
except_=func.__name__)
for f in follow:
_add_function_or_classmethod(profiler, f, args)
profiler.enable_by_count()
return func(*args, **kwargs)
finally:
profiler.print_stats()
return profiled_func
return inner
except ImportError as error:
LineProfiler = None
warnings.warn(str(error))
def do_profile(follow=(), follow_all_methods=False):
"Helpful if you accidentally leave in production!"
def inner(func):
def nothing(*args, **kwargs):
return func(*args, **kwargs)
return nothing
return inner
def timefun(fun):
""" Timing decorator
Timers require you to do some digging. Start wrapping a few of the higher level
functions and confirm where the bottleneck is, then drill down into that function,
repeating as you go. When you find the disproportionately slow bit of code, fix it,
and work your way back out confirming that it is fixed.
Handy tip: Don't forget the handy timeit module! It tends to be more useful for
benchmarking small pieces of code than for doing the actual investigation.
Timer Pros:
Easy to understand and implement. Also very simple to compare before and after fixes.
Works across many languages.
Timer Cons:
Sometimes a little too simplistic for extremely complex codebases, you might spend
more time placing and replacing boilerplate code than you will fixing the problem!
"""
@wraps(fun)
def measure_time(*args, **kwargs):
t1 = timer()
result = fun(*args, **kwargs)
t2 = timer()
print("@timefun:" + fun.__name__ + " took " + str(t2 - t1) + " seconds")
return result
return measure_time
class TimeWith():
"""
Timing context manager
"""
def __init__(self, name=''):
self.name = name
self.start = timer()
@property
def elapsed(self):
return timer() - self.start
def checkpoint(self, name=''):
print('{timer} {checkpoint} took {elapsed} seconds'.format(timer=self.name,
checkpoint=name,
elapsed=self.elapsed,
).strip())
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.checkpoint('finished')
def do_cprofile(func):
"""
Decorator to profile a function
It gives good numbers on various function calls but it omits a vital piece
of information: what is it about a function that makes it so slow?
However, it is a great start to basic profiling. Sometimes it can even
point you to the solution with very little fuss. I often use it as a
gut check to start the debugging process before I dig deeper into the
specific functions that are either slow are called way too often.
Pros:
No external dependencies and quite fast. Useful for quick high-level
checks.
Cons:
Rather limited information that usually requires deeper debugging; reports
are a bit unintuitive, especially for complex codebases.
See also
--------
do_profile, test_do_profile
"""
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiled_func
|
[
"per.andreas.brodtkorb@gmail.com"
] |
per.andreas.brodtkorb@gmail.com
|
d010fb79c796f34db9c3ccef04a23dd8ba9fc29a
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/8-loops_20200406005828.py
|
5e027ff5acfe70abba31bc7f2389a11006536d94
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239
| 2020-04-23T19:18:06
| 2020-04-23T19:18:06
| 253,171,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
# A for loop is used for iterating over a sequence (that is either a list, a tuple, a dictionary, a set, or a string).
# Simple Loop
# people = ['John', 'Mary', 'Anna', 'Margaret', 'Sylvia']
# for person in people:
# print('Current person is: ', person)
# Break
# people1 = ['John', 'Mary', 'Anna', 'Margaret', 'Sylvia', 'Monique']
# for child in people1:
# if child == 'Anna':
# print('Current child is: ', child)
# break
# gamers = ['John', 'Mary', 'Anna', 'Margaret', 'Sylvia', 'Monique']
# for person in gamers:
# if person == 'Caty':
# continue
# print('Current gamer is: ', person)
# Range
# gamers = ['John', 'Mary', 'Anna', 'Margaret', 'Sylvia', 'Monique']
# for i in range (len(gamers)):
# print('Current gamer: ', gamers[i])
# for i in range (0, 10):
# print ('Number ', i)
# While loops execute a set of statements as long as a condition is true.
count = 0
|
[
"tikana4@yahoo.com"
] |
tikana4@yahoo.com
|
a01a04b5ee23fefda29af42dbf1c20f8e38c55bf
|
e5ade004dcf04b3b0a3598d0496286879f30206e
|
/run_cai_thien.py
|
1a97037e36229da1effe6ee950796eb54610e673
|
[] |
no_license
|
fkatonline/python-daotao
|
ea3bf8e77396547dd8d56fbfb294f54ef77b7bc4
|
91ffebbae719e995a87985c4bb8fe9c0d80e791f
|
refs/heads/master
| 2021-10-12T05:48:16.159908
| 2021-10-08T08:17:23
| 2021-10-08T08:17:23
| 238,109,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
from lich_thi_cai_thien import LichThi
# i = 1
# for item in data:
# bot.set_lich_tung_sinh_vien(item['mssv'], item['mahp'], item['gd'], item['thoi_gian'])
# print(i, end=' ')
# print(item)
# i = i + 1
# bot.driver.quit()
raw_data = """YY0312
CB0205
YY1907
DD0205
YY0402
CB0405
YY1716
YY1716
YY1009"""
bot = LichThi()
# data = raw_data.split()
# for d in data:
# # bot.set_lich_mhp(d)
# bot.filter_by_mhp(d)
bot.dong_tien(1653010173)
|
[
"fkatonline@gmail.com"
] |
fkatonline@gmail.com
|
cef31d1e6e9cb8cc1e29654e89b9cb2913b3a2e8
|
8c3f9dd66e04288cf745bfd700ba7a27347aa58e
|
/defang/defang.py
|
284a6dd7c0917d22288e1d78239da2f9f11b1927
|
[
"MIT"
] |
permissive
|
ravewillow6383/data-structures-and-algorithms-python
|
df218b1fd70517f27801d52b57a64478b6ea2034
|
98533ee241a3ae452dab1ecb87aab39742005e35
|
refs/heads/master
| 2021-06-17T06:57:41.842210
| 2019-10-03T20:00:12
| 2019-10-03T20:00:12
| 195,877,165
| 0
| 0
|
MIT
| 2021-04-20T18:41:19
| 2019-07-08T19:52:47
|
Python
|
UTF-8
|
Python
| false
| false
| 241
|
py
|
def defang(address):
if len(address) > 0:
add_two = address.replace('.', '[.]')
if len(address) > 0:
return add_two
if len(address) < 1:
raise ValueError('I am sorry, that ip address is empty.')
|
[
"ravewillow6383@gmail.com"
] |
ravewillow6383@gmail.com
|
049871996a5061351366db13bea9998b4ad5707d
|
a17bb41dc850f94f8fdb4dfb135a47d8c79f9d48
|
/DynClmpComposite.py
|
5a1d9ffa4e71e87967bec1e9a53d341d0ab98f64
|
[] |
no_license
|
matthewperkins/plotting
|
93cb2e1df2ad5b0329fc0e7f2ab83f6e26ad1457
|
72132eb519743939955c037dc80ec162287cf772
|
refs/heads/master
| 2021-01-22T20:29:27.925659
| 2014-07-07T20:58:14
| 2014-07-07T20:58:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,512
|
py
|
from traits.api import HasTraits, Instance, Int, Float, Array, on_trait_change, Button
from traitsui.api import View, Group, Item
from enable.api import ColorTrait
from enable.component_editor import ComponentEditor
from chaco.api import marker_trait, Plot, ArrayPlotData, VPlotContainer
from numpy import linspace, sin
import pdb
import numpy as np
import sys
class HHCurrentTraits(HasTraits):
plots = Instance(VPlotContainer)
# write_button = Instance(Button)
write_button = Button()
ErevCur1 = Float(20)
GmaxCur1 = Float(0.05)
MvhalfCur1 = Float(50)
MKCur1 = Float(-80)
MssminCur1 = Float(0)
Mdenom_expCur1 = Float(1)
MpCur1 = Float(1)
NvhalfCur1 = Float(25)
NKCur1 = Float(10)
NssminCur1 = Float(0)
Ndenom_expCur1 = Float(1)
NpCur1 = Float(1)
HvhalfCur1 = Float(-80)
HKCur1 = Float(10)
HssminCur1 = Float(0)
Hdenom_expCur1 = Float(1)
HpCur1 = Float(0)
ErevCur2 = Float(20)
GmaxCur2 = Float(0.08)
MvhalfCur2 = Float(-45)
MKCur2 = Float(-5)
MssminCur2 = Float(0)
Mdenom_expCur2 = Float(1)
MpCur2 = Float(1)
NvhalfCur2 = Float(25)
NKCur2 = Float(10)
NssminCur2 = Float(0)
Ndenom_expCur2 = Float(1)
NpCur2 = Float(1)
HvhalfCur2 = Float(-80)
HKCur2 = Float(10)
HssminCur2 = Float(0)
Hdenom_expCur2 = Float(1)
HpCur2 = Float(0)
traits_view = View(
Group(
Item('plots', editor=ComponentEditor(), show_label=False),
Group(
Group(
Group(
Item('MvhalfCur1'),
Item('MKCur1'),
Item('MssminCur1'),
Item('Mdenom_expCur1'),
Item('MpCur1'),
orientation = "vertical"),
Group(
Item('NvhalfCur1'),
Item('NKCur1'),
Item('NssminCur1'),
Item('Ndenom_expCur1'),
Item('NpCur1'),
orientation = "vertical"),
Group(
Item('HvhalfCur1'),
Item('HKCur1'),
Item('HssminCur1'),
Item('Hdenom_expCur1'),
Item('HpCur1'),
orientation = "vertical"),
Group(
Item('GmaxCur1'),
Item('ErevCur1'),
orientation = "vertical")),
Group(
Group(
Item('MvhalfCur2'),
Item('MKCur2'),
Item('MssminCur2'),
Item('Mdenom_expCur2'),
Item('MpCur2'),
orientation = "vertical"),
Group(
Item('NvhalfCur2'),
Item('NKCur2'),
Item('NssminCur2'),
Item('Ndenom_expCur2'),
Item('NpCur2'),
orientation = "vertical"),
Group(
Item('HvhalfCur2'),
Item('HKCur2'),
Item('HssminCur2'),
Item('Hdenom_expCur2'),
Item('HpCur2'),
orientation = "vertical"),
Group(
Item('GmaxCur2'),
Item('ErevCur2'),
orientation = "vertical")),
Item('write_button'),
orientation = "horizontal"),
orientation = "horizontal"))
def __init__(self, ExprmntVm=None, ExprmntnA=None):
super(HHCurrentTraits, self).__init__()
# gates
self.vm = linspace(-120,65,1000)
((MCur1,NCur1,HCur1),(MCur2,NCur2,HCur2)) = self.__gates()
self.Cur1gatedata = ArrayPlotData(x=self.vm, M=MCur1, N=NCur1, H=HCur1)
self.Cur2gatedata = ArrayPlotData(x=self.vm, M=MCur2, N=NCur2, H=HCur2)
Cur1gatesplot = Plot(self.Cur1gatedata)
Cur1gatesplot.plot(("x", "M"), type = "line", color = "blue")
Cur1gatesplot.plot(("x", "N"), type = "line", color = "green")
Cur1gatesplot.plot(("x", "H"), type = "line", color = "red")
Cur2gatesplot = Plot(self.Cur2gatedata)
Cur2gatesplot.plot(("x", "M"), type = "line", color = "blue")
Cur2gatesplot.plot(("x", "N"), type = "line", color = "green")
Cur2gatesplot.plot(("x", "H"), type = "line", color = "red")
(Cur1,Cur2) = self.__iv()
self.ivdata = ArrayPlotData(x=self.vm, nA1=Cur1, nA2=Cur2, combin=Cur1+Cur2)
ivplot = Plot(self.ivdata)
ivplot.plot(("x", "nA1"), type = "line", color = "blue")
ivplot.plot(("x", "nA2"), type = "line", color = "green")
ivplot.plot(("x", "combin"), type = "line", color = "black")
if ExprmntVm is not None:
self.ivdata.set_data('ExptVm',ExprmntVm)
self.ivdata.set_data('ExptnA',ExprmntnA)
ivplot.plot(("ExptVm", "ExptnA"),
type = "scatter", color = "red", marker_size = 5)
self.plots = VPlotContainer(ivplot, Cur2gatesplot, Cur1gatesplot)
self.plots.spacing = 0
ivplot.padding_top = 0
Cur1gatesplot.padding_bottom = 0
Cur2gatesplot.padding_top = 0
self.write_button = Button(label="Print_Pars")
def __gates(self):
MCur1 = (1-self.MssminCur1)/(1 + np.exp((self.vm - self.MvhalfCur1)/self.MKCur1))**self.Mdenom_expCur1 + self.MssminCur1
NCur1 = (1-self.NssminCur1)/(1 + np.exp((self.vm - self.NvhalfCur1)/self.NKCur1))**self.Ndenom_expCur1 + self.NssminCur1
HCur1 = (1-self.HssminCur1)/(1 + np.exp((self.vm - self.HvhalfCur1)/self.HKCur1))**self.Hdenom_expCur1 + self.HssminCur1
MCur2 = (1-self.MssminCur2)/(1 + np.exp((self.vm - self.MvhalfCur2)/self.MKCur2))**self.Mdenom_expCur2 + self.MssminCur2
NCur2 = (1-self.NssminCur2)/(1 + np.exp((self.vm - self.NvhalfCur2)/self.NKCur2))**self.Ndenom_expCur2 + self.NssminCur2
HCur2 = (1-self.HssminCur2)/(1 + np.exp((self.vm - self.HvhalfCur2)/self.HKCur2))**self.Hdenom_expCur2 + self.HssminCur2
if self.MpCur1==0:
MCur1 = np.repeat(0,len(self.vm))
if self.NpCur1==0:
NCur1 = np.repeat(0,len(self.vm))
if self.HpCur1==0:
HCur1 = np.repeat(0,len(self.vm))
if self.MpCur2==0:
MCur2 = np.repeat(0,len(self.vm))
if self.NpCur2==0:
NCur2 = np.repeat(0,len(self.vm))
if self.HpCur2==0:
HCur2 = np.repeat(0,len(self.vm))
return ((MCur1,NCur1,HCur1),(MCur2, NCur2, HCur2))
def __iv(self):
((MCur1,NCur1,HCur1),(MCur2,NCur2,HCur2)) = self.__gates()
Cur1 = (MCur1**self.MpCur1 * NCur1**self.NpCur1 * HCur1**self.HpCur1)*self.GmaxCur1*(self.vm - self.ErevCur1)
Cur2 = (MCur2**self.MpCur2 * NCur2**self.NpCur2 * HCur2**self.HpCur2)*self.GmaxCur2*(self.vm - self.ErevCur2)
return (Cur1,Cur2)
# '+' matches all traits on the object
@on_trait_change('+')
def _calc_current(self):
((MCur1,NCur1,HCur1),(MCur2,NCur2,HCur2)) = self.__gates()
(Cur1, Cur2) = self.__iv()
comb = Cur1 + Cur2
self.Cur1gatedata.set_data("M", MCur1)
self.Cur1gatedata.set_data("N", NCur1)
self.Cur1gatedata.set_data("H", HCur1)
self.Cur2gatedata.set_data("M", MCur2)
self.Cur2gatedata.set_data("N", NCur2)
self.Cur2gatedata.set_data("H", HCur2)
self.ivdata.set_data("nA1", Cur1)
self.ivdata.set_data("nA2", Cur2)
self.ivdata.set_data("combin", comb)
def _write_button_fired(self):
with open('pars.txt', 'wt') as sys.stdout: self.print_traits()
def main(atf_path):
''' pass in the full path of an ATF file with the difference current IV'''
from atf_reader import ATFreader
atf = ATFreader(atf_path)
mV = atf.read_data()[:,0]
nA = atf.read_data()[:,1]
HHCurrentTraits(ExprmntVm = mV, ExprmntnA = nA).configure_traits()
if __name__ == "__main__":
''' pass in the full path of an ATF file with the difference current IV'''
import sys
if len(sys.argv)<2:
print("need name of axon text file")
raise ValueError
main(sys.argv[1])
|
[
"matthewhperkins@gmail.com"
] |
matthewhperkins@gmail.com
|
f14c83c39691d2fcf0613fdc1cd9d2637609162c
|
a0e16209959117cda0ee86b3beddf9ad3f0ea2c6
|
/migrations/versions/88e3f6b8529b_10a.py
|
f0334f801e824056692c43cec4b5bd854b328293
|
[] |
no_license
|
kaishuibaicai/Flask-web-learning
|
fa5e28233e08d0ae2eb04e0d05b2ce6cb10d04d4
|
2b12390f7d117b5e089a02fbb31c5d264c3753ab
|
refs/heads/master
| 2020-12-02T19:42:00.326905
| 2017-08-21T07:27:20
| 2017-08-21T07:27:20
| 96,376,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
"""10a
Revision ID: 88e3f6b8529b
Revises: 78397128412b
Create Date: 2017-07-15 12:02:55.853313
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '88e3f6b8529b'
down_revision = '78397128412b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('about_me', sa.Text(), nullable=True))
op.add_column('users', sa.Column('last_seen', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('location', sa.String(length=64), nullable=True))
op.add_column('users', sa.Column('member_since', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('name', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'name')
op.drop_column('users', 'member_since')
op.drop_column('users', 'location')
op.drop_column('users', 'last_seen')
op.drop_column('users', 'about_me')
# ### end Alembic commands ###
|
[
"a272251416@gmail.com"
] |
a272251416@gmail.com
|
cf68ebfab84f0da4056dfcede45a67a68f7f0db2
|
7e39a959e5d37f8ae342a031cbf551d415a331c8
|
/Month1/Xml_sax_parser_attr.py
|
ded5313c5f51e5473e39d3ea8b312f0b1e65a125
|
[] |
no_license
|
ciecmoxia/moxiatest
|
82863a8af9580bf369af9007be0feb9fc6c7a08e
|
f06663a4c85006a5049bf2021d849106ce2f137d
|
refs/heads/main
| 2023-07-31T05:55:31.915694
| 2021-09-28T09:34:17
| 2021-09-28T09:34:17
| 329,915,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
import xml.sax
class Peoson:
#初始化对象
def __init__(self,name,age):
self.name=name
self.age=age
def __str__(self):
return 'Person name:{s.name},Person age:{s.age}'.format(s=self)
class Myhandler(xml.sax.handler.ContentHandler):#继承
def __init__(self):
self.persons=[]
def startElement(self, tag_name, tag_attrs):
if tag_name=='peoson':
# print(tag_attrs['name'],tag_attrs['age'])
self.persons.append(Peoson(tag_attrs['name'],tag_attrs['age']))
if __name__=="__main__":
#创建xml解析器
parser=xml.sax.make_parser()
#关闭命名空间解析
parser.setFeature(xml.sax.handler.feature_namespaces,0)
#实例化对象
myhandler=Myhandler()
parser.setContentHandler(myhandler)
#解析文档
parser.parse('Xml_practice2.xml')
for i in myhandler.persons:
print(i)
|
[
"ciecmoxia@163.com"
] |
ciecmoxia@163.com
|
0ceaa149f62c4d0ac1618af38585c3570814e82d
|
6aa7e203f278b9d1fd01244e740d5c944cc7c3d3
|
/airflow/providers/apache/kylin/hooks/kylin.py
|
59f6ce94ff23200923bd0942ba05a73279150f5b
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] |
permissive
|
laserpedro/airflow
|
83fc991d91749550b151c81876d9e7864bff3946
|
a28afa8172489e41ecf7c381674a0cb91de850ff
|
refs/heads/master
| 2023-01-02T04:55:34.030935
| 2020-10-24T15:55:11
| 2020-10-24T15:55:11
| 285,867,990
| 1
| 0
|
Apache-2.0
| 2020-08-07T15:56:49
| 2020-08-07T15:56:49
| null |
UTF-8
|
Python
| false
| false
| 2,795
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from kylinpy import exceptions, kylinpy
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
class KylinHook(BaseHook):
"""
:param kylin_conn_id: The connection id as configured in Airflow administration.
:type kylin_conn_id: str
:param project: porject name
:type project: Optional[str]
:param dsn: dsn
:type dsn: Optional[str]
"""
def __init__(self,
kylin_conn_id: Optional[str] = 'kylin_default',
project: Optional[str] = None,
dsn: Optional[str] = None
):
super().__init__()
self.kylin_conn_id = kylin_conn_id
self.project = project
self.dsn = dsn
def get_conn(self):
conn = self.get_connection(self.kylin_conn_id)
if self.dsn:
return kylinpy.create_kylin(self.dsn)
else:
self.project = self.project if self.project else conn.schema
return kylinpy.Kylin(conn.host, username=conn.login,
password=conn.password, port=conn.port,
project=self.project, **conn.extra_dejson)
def cube_run(self, datasource_name, op, **op_args):
"""
run CubeSource command whitch in CubeSource.support_invoke_command
:param datasource_name:
:param op: command
:param op_args: command args
:return: response
"""
cube_source = self.get_conn().get_datasource(datasource_name)
try:
response = cube_source.invoke_command(op, **op_args)
return response
except exceptions.KylinError as err:
raise AirflowException("Cube operation {} error , Message: {}".format(op, err))
def get_job_status(self, job_id):
"""
get job status
:param job_id: kylin job id
:return: job status
"""
return self.get_conn().get_job(job_id).status
|
[
"noreply@github.com"
] |
noreply@github.com
|
e0ba1b56a9d843c025074f4653b09ddf37e0ad77
|
65d58f994c4c4e94ec71fd371302180c92989daf
|
/script1.py
|
3a4cf1f4703f34faea9bef094bf0e87c5d9b9d65
|
[] |
no_license
|
MattDawson2020/Python-Flaskwebapp
|
16b117266d6f74be9e864f1b8d66c9e496d8aae6
|
4bf1adf8a68d3d452bd43c468dff7ba980e64403
|
refs/heads/main
| 2023-06-22T16:31:54.292594
| 2021-07-27T15:09:45
| 2021-07-27T15:09:45
| 388,769,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
from flask import Flask, render_template
#import flask class object from library
app = Flask(__name__)
# initilize flask object, __name__ assigns the name main to the app on this page, if imported it is assigned the name script1.py
@app.route('/')
#define the root route
def home():
return render_template("home.html")
# it seems like the function being declared immediately below the route is what links the route to the controller/action
@app.route('/about')
def about():
return render_template("about.html")
if __name__ == "__main__":
app.run(debug=True)
|
[
"mattdawson18@gmail.com"
] |
mattdawson18@gmail.com
|
2b56f5546d3e842e5db09a8df54c167b3dc36547
|
87591443dfb54390a1052a0c149abab23b7f414d
|
/CH00_补充随笔/4-2-4.损失函数.py
|
4311a3b129aaf35d286bd2803c91701f336f089f
|
[] |
no_license
|
by777/fluentPython
|
1c12b4e27cd813dab68c9999741e956489c81bb7
|
38bced932755d53ab280e15fb083fffa13704e36
|
refs/heads/main
| 2023-04-04T01:59:39.255652
| 2021-04-16T06:29:50
| 2021-04-16T06:29:50
| 351,430,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
# -*- coding: utf-8 -*-
# @TIME : 2021/3/25 19:11
# @AUTHOR : Xu Bai
# @FILE : 4-2-4.损失函数.py
# @DESCRIPTION :
import torch as t
from torch.autograd import Variable as V
# batch_size=3,2个类别
score = V(t.randn(3, 3))
# 3个样本分别属于1,0,1类,label必须是LongTensor
label = V(t.Tensor([1, 0, 1])).long()
criterion = t.nn.CrossEntropyLoss()
loss = criterion(score, label)
print(loss)
|
[
"1373953675@qq.com"
] |
1373953675@qq.com
|
dd9555e254f4da9a4f12b16a39b2913b8b128302
|
e54d61d6db1e61cf6caa414b6b7fdfb6fb17657a
|
/9-Flask知识点/笔记涉及项目代码/day03Tem/App/models.py
|
e8e839d7c4ad56b2891354c1f7bbc7a4bb2968d6
|
[] |
no_license
|
chen12356/Linux-mysql-orm-tornado
|
6af83c317208cc9f58fab9e261dfe4af5197dc34
|
353a2f02c65099b1cdc2146c647cc63b8979fe15
|
refs/heads/master
| 2020-07-27T00:33:40.518997
| 2019-09-24T14:29:43
| 2019-09-24T14:29:43
| 208,810,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Info(db.Model):
id = db.Column(db.Integer,primary_key=True,autoincrement=True)
name = db.Column(db.String(20))
sex = db.Column(db.String(8))
age = db.Column(db.Integer)
__tablename__ = 'emp_info'
|
[
"1406159466@qq.com"
] |
1406159466@qq.com
|
6d9189588d108e777f2567c9c3d9933cdcbceb17
|
5c3f1027aaf6265e09eebf473a688ff449e9c078
|
/Main.py
|
345044070dea01f0cda2a79a0588aebb2b801dce
|
[] |
no_license
|
vedant-shukla/Instagram-User-Profiling
|
f8c6e8263c71ae9e84bb4e94dea75b0dcb0a520e
|
8dc024ef61c88bc5bc8aee6f61c87b162075c73c
|
refs/heads/master
| 2020-03-25T12:53:04.632400
| 2018-08-07T00:54:08
| 2018-08-07T00:54:08
| 143,798,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,449
|
py
|
import six
import copy
import re
import json
import bs4 as bs
from Login import *
# from ImageWorker import ImageWorker
PARSER = 'html.parser'
meta_data = {}
medias_queue = six.moves.queue.Queue()
SHARED_DATA = re.compile(r'window._sharedData = ({[^\n]*});')
TEMPLATE = re.compile(r'{([a-zA-Z]*)}')
CODE_URL = re.compile(r'p/([^/]*)')
login = Login("dishankmehta", "Unnamedthe@22")
login.login_user()
print("Input the username you wish to scrap: ")
profile = str(input("Enter Username: "))
profile_parameters = {
'target': profile,
'page_name': 'ProfilePage',
'section_name': 'user',
'base_url': "https://www.instagram.com/{}/"
}
def fetch_media_and_download(media_count):
seen = set()
for page in fetch_pages(media_count):
for media in \
page['entry_data'][profile_parameters['page_name']][0][profile_parameters['section_name']]['media'][
'nodes']:
if media['id'] in seen:
return
yield media
seen.add(media['id'])
if not media['is_video']:
initialize_workers()
medias_queued = fill_media_queue(media_count, False)
print(medias_queued)
def initialize_workers():
workers = []
medias_queue = six.moves.queue.Queue()
for _ in six.moves.range(16):
worker = ImageWorker()
worker.start()
workers.append(worker)
def fetch_pages(media_count=None):
url = profile_parameters['base_url'].format(profile_parameters['target'])
page_count = 0
while True:
page_count += 1
res = login.session.get(url)
data = fetch_shared_data(res)
try:
media_info = data['entry_data'][profile_parameters['page_name']][0] \
[profile_parameters['section_name']]['media']
except KeyError:
print("Could not find page of user: {}".format(profile_parameters['target']))
return
if media_count is None:
media_count = data['entry_data'][profile_parameters['page_name']][0] \
[profile_parameters['section_name']]['media']['count']
if 'max_id' not in url and profile_parameters['section_name'] == 'user':
meta_data = parse_metadata_from_page(data)
print(data)
yield data
if not media_info['page_info']['has_next_page'] or not media_info['nodes']:
if not media_info['nodes']:
if login.is_logged_in():
msg = 'Profile {} is private, retry after logging in.'.format(profile_parameters['target'])
else:
msg = 'Profile {} is private, and you are not following it'.format(profile_parameters['target'])
print(msg)
break
else:
url = '{}?max_id={}'.format(profile_parameters['base_url'].format(profile_parameters['target']),
media_info['page_info']['end_cursor'])
def fetch_shared_data(res):
soup = bs.BeautifulSoup(res.text, PARSER)
script = soup.find('body').find('script', {'type': 'text/javascript'})
return json.loads(SHARED_DATA.match(script.text).group(1))
def parse_metadata_from_page(data):
user = data["entry_data"][profile_parameters['page_name']][0]["user"]
metadata = {}
for k, v in six.iteritems(user):
metadata[k] = copy.copy(v)
metadata['follows'] = metadata['follows']['count']
metadata['followed_by'] = metadata['followed_by']['count']
del metadata['media']['nodes']
return metadata
def fill_media_queue(media_count, new_only=False):
medias_queued = 0
for media in fetch_media_and_download(media_count):
medias_queued, stop = add_media_to_queue(media, media_count, medias_queued, new_only)
if stop:
break
return medias_queued
def add_media_to_queue(media, media_count, medias_queued, new_only):
media = get_post_info(media.get('shortcode') or media['code'])
medias_queued += 1
medias_queue.put(media)
return medias_queued, True
def get_post_info(id):
url = "https://www.instagram.com/p/{}/".format(id)
res = login.session.get(url)
media = fetch_shared_data(res)['entry_data']['PostPage'][0] \
['graphq1']['shortcode media']
media.setdefault('code', media.get('shortcode'))
media.setdefault('desplay_src', media.get('display_url'))
return media
|
[
"13bce113@nirmauni.ac.in"
] |
13bce113@nirmauni.ac.in
|
204ac2fd90638f66972c04d9ba39b0b16d53f4f6
|
024594e43b96314c48b01dfeb1c2d3c38a9a069d
|
/chapter7/detect_hog_svm.py
|
f955882ec275aa75fb35560f495ec62491fdd48d
|
[] |
no_license
|
PLLLLLLL/OpenCV3-Python
|
8a7a246246ddc25fa8ae127de764fa6574e145c2
|
a916a83c3efe6c24be8ba4b7b1a59498c2d06e9b
|
refs/heads/master
| 2020-04-12T20:14:01.129354
| 2019-03-29T07:35:37
| 2019-03-29T07:35:37
| 162,729,999
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
import cv2
import numpy as np
from os.path import join
datapath = "/home/d3athmast3r/dev/python/CarData/TrainImages/"
def path(cls,i):
return "%s/%s%d.pgm" % (datapath,cls,i+1)
pos, neg = "pos-", "neg-"
detect = cv2.xfeatures2d.SIFT_create()
extract = cv2.xfeatures2d.SIFT_create()
flann_params = dict(algorithm = 1, trees = 5)
matcher = cv2.FlannBasedMatcher(flann_params, {})
bow_kmeans_trainer = cv2.BOWKMeansTrainer(40)
extract_bow = cv2.BOWImgDescriptorExtractor(extract, matcher)
def extract_sift(fn):
im = cv2.imread(fn,0)
return extract.compute(im, detect.detect(im))[1]
for i in range(8):
bow_kmeans_trainer.add(extract_sift(path(pos,i)))
bow_kmeans_trainer.add(extract_sift(path(neg,i)))
voc = bow_kmeans_trainer.cluster()
extract_bow.setVocabulary( voc )
def bow_features(fn):
im = cv2.imread(fn,0)
return extract_bow.compute(im, detect.detect(im))
traindata, trainlabels = [],[]
for i in range(20):
traindata.extend(bow_features(path(pos, i))); trainlabels.append(1)
traindata.extend(bow_features(path(neg, i))); trainlabels.append(-1)
svm = cv2.ml.SVM_create()
svm.train(np.array(traindata), cv2.ml.ROW_SAMPLE, np.array(trainlabels))
def predict(fn):
f = bow_features(fn)
p = svm.predict(f)
print (fn, "\t", p[1][0][0])
return p
car, notcar = "/home/d3athmast3r/dev/python/study/images/car.jpg", "/home/d3athmast3r/dev/python/study/images/bb.jpg"
car_img = cv2.imread(car)
notcar_img = cv2.imread(notcar)
car_predict = predict(car)
not_car_predict = predict(notcar)
font = cv2.FONT_HERSHEY_SIMPLEX
if (car_predict[1][0][0] == 1.0):
cv2.putText(car_img,'Car Detected',(10,30), font, 1,(0,255,0),2,cv2.LINE_AA)
if (not_car_predict[1][0][0] == -1.0):
cv2.putText(notcar_img,'Car Not Detected',(10,30), font, 1,(0,0, 255),2,cv2.LINE_AA)
cv2.imshow('BOW + SVM Success', car_img)
cv2.imshow('BOW + SVM Failure', notcar_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"963029512@qq.com"
] |
963029512@qq.com
|
c2f331959148c7f2188f9d7d23486a0a3939935a
|
c763ed96250b1b7168625a279e5409efec888cc7
|
/backup_Dispersion.py
|
e300901e3ba1824bdee4e844a612c8cc6d51cb33
|
[] |
no_license
|
rumbaugh/pythonscripts
|
1e63167c5ef831b4287fe080256e6bc1f9caa923
|
58b1d480371a75e197fedd8c45e0af91b3528d98
|
refs/heads/master
| 2020-12-24T12:00:46.203202
| 2017-06-26T20:26:21
| 2017-06-26T20:26:21
| 73,103,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,510
|
py
|
#these functions calculate the dispersions as defined in Pelt et al. (1996,1998)
#and used in Fassnacht et al. 1999
import time
import numpy as np
import sys
def D_2_old(A,B,A_t,B_t,A_err,B_err,mu,tau):
G_ref = np.append(np.zeros(len(A),dtype='int'),np.ones(len(B),dtype='int'))
comb_flux = np.append(A,B*mu)
comb_err = np.append(A_err,mu*B_err)
comb_t = np.append(A_t,B_t+tau)
comb_t_argsort = np.argsort(comb_t)
sum_W_k_G_k = 0.0
D_2_top = 0.0
for iwg in range(0,len(comb_t)-1):
if G_ref[comb_t_argsort[iwg]] + G_ref[comb_t_argsort[iwg+1]] == 1:
errt = 1.0/(comb_err[comb_t_argsort[iwg]]**2+comb_err[comb_t_argsort[iwg+1]]**2)
sum_W_k_G_k += errt
D_2_top += errt*(comb_flux[comb_t_argsort[iwg+1]]-comb_flux[comb_t_argsort[iwg]])**2
return D_2_top/sum_W_k_G_k
def D_2(A,B,A_t,B_t,A_err,B_err,mu,tau):
G_ref = np.append(np.zeros(len(A),dtype='int'),np.ones(len(B),dtype='int'))
comb_flux = np.append(A,B*mu)
comb_err = np.append(A_err,B_err)
comb_t = np.append(A_t,B_t+tau)
comb_t_argsort = np.argsort(comb_t)
sum_W_k_G_k = 0.0
D_2_top = 0.0
G_ref_check = G_ref[comb_t_argsort] - np.roll(G_ref[comb_t_argsort],-1)
gref = np.where(G_ref_check[:len(G_ref_check)-1] != 0)
gref = gref[0]
for iwg in range(0,len(gref)):
errt = 1.0/(comb_err[comb_t_argsort[gref[iwg]]]**2+comb_err[comb_t_argsort[gref[iwg]+1]]**2)
sum_W_k_G_k += errt
D_2_top += errt*(comb_flux[comb_t_argsort[gref[iwg]]]-comb_flux[comb_t_argsort[gref[iwg]+1]])**2
return D_2_top/sum_W_k_G_k
#The difference between D_2 and D_2b is that in D_2 the B flux is multiplied
#by mu while in D_2b the A flux is divided by mu. There is a similar
#difference between D_4_2 and D_4_2b
def D_2b(A,B,A_t,B_t,A_err,B_err,mu,tau):
G_ref = np.append(np.zeros(len(A),dtype='int'),np.ones(len(B),dtype='int'))
comb_flux = np.append(A/mu,B)
comb_err = np.append(A_err,B_err)
comb_t = np.append(A_t,B_t+tau)
comb_t_argsort = np.argsort(comb_t)
sum_W_k_G_k = 0.0
D_2_top = 0.0
G_ref_check = G_ref[comb_t_argsort] - np.roll(G_ref[comb_t_argsort],-1)
gref = np.where(G_ref_check[:len(G_ref_check)-1] != 0)
gref = gref[0]
for iwg in range(0,len(gref)):
errt = 1.0/(comb_err[comb_t_argsort[gref[iwg]]]**2+comb_err[comb_t_argsort[gref[iwg]+1]]**2)
sum_W_k_G_k += errt
D_2_top += errt*(comb_flux[comb_t_argsort[gref[iwg]]]-comb_flux[comb_t_argsort[gref[iwg]+1]])**2
return D_2_top/sum_W_k_G_k
def D_4_2_old(A,B,A_t,B_t,A_err,B_err,mu,tau,delta):
G_ref = np.append(np.zeros(len(A),dtype='int'),np.ones(len(B),dtype='int'))
comb_flux = np.append(A,B*mu)
comb_err = np.append(A_err,B_err)
comb_t = np.append(A_t,B_t+tau)
comb_t_argsort = np.argsort(comb_t)
sum_bottom,sum_top = 0.0,0.0
for iwg in range(0,len(comb_t)-1):
for iwg2 in range(iwg+1,len(comb_t)):
if ((G_ref[comb_t_argsort[iwg]] + G_ref[comb_t_argsort[iwg2]] == 1) & (np.fabs(comb_t[comb_t_argsort[iwg]] - comb_t[comb_t_argsort[iwg2]]) <= delta)):
errt = 1.0/(comb_err[comb_t_argsort[iwg]]**2+comb_err[comb_t_argsort[iwg2]]**2)
sum_bottom += (1-np.fabs(comb_t[comb_t_argsort[iwg]] - comb_t[comb_t_argsort[iwg+1]])/delta)*errt
sum_top += (1-np.fabs(comb_t[comb_t_argsort[iwg]] - comb_t[comb_t_argsort[iwg2]])/delta)*(comb_flux[comb_t_argsort[iwg2]]-comb_flux[comb_t_argsort[iwg]])**2*errt
if ((sum_bottom == 0) | (sum_top == 0)):
return 999999999.
else:
if sum_top*1.0/sum_bottom == 0.0:
return 999999.
else:
return sum_top*1.0/sum_bottom
def D_4_2(A,B,A_t,B_t,A_err,B_err,mu,tau,delta):
G_ref = np.append(np.zeros(len(A),dtype='int'),np.ones(len(B),dtype='int'))
comb_flux = np.append(A,B*mu)
comb_err = np.append(A_err,B_err)
comb_t = np.append(A_t,B_t+tau)
comb_t_argsort = np.argsort(comb_t)
sum_bottom,sum_top = 0.0,0.0
for iwg in range(0,len(comb_t)-1):
giwg = np.where((comb_t[comb_t_argsort[iwg]] - comb_t[comb_t_argsort[iwg+1:]] <= delta) & (comb_t[comb_t_argsort[iwg]] - comb_t[comb_t_argsort[iwg+1:]] >= -1*delta))
giwg = giwg[0]
for iwg2 in range(0,len(giwg)):
if ((G_ref[comb_t_argsort[iwg]] + G_ref[comb_t_argsort[iwg+1+giwg[iwg2]]] == 1)):
errt = 1.0/(comb_err[comb_t_argsort[iwg]]**2+comb_err[comb_t_argsort[iwg+1+giwg[iwg2]]]**2)
fabst = (1-np.fabs(comb_t[comb_t_argsort[iwg]] - comb_t[comb_t_argsort[iwg+1+giwg[iwg2]]])/delta)
sum_bottom += fabst*errt
sum_top += fabst*(comb_flux[comb_t_argsort[iwg+1+giwg[iwg2]]]-comb_flux[comb_t_argsort[iwg]])**2*errt
if ((sum_bottom == 0) | (sum_top == 0)):
return 999999999.
else:
if sum_top*1.0/sum_bottom == 0.0:
return 999999.
else:
return sum_top*1.0/sum_bottom
def D_4_2b(A,B,A_t,B_t,A_err,B_err,mu,tau,delta):
G_ref = np.append(np.zeros(len(A),dtype='int'),np.ones(len(B),dtype='int'))
comb_flux = np.append(A/mu,B)
comb_err = np.append(A_err,B_err)
comb_t = np.append(A_t,B_t+tau)
comb_t_argsort = np.argsort(comb_t)
sum_bottom,sum_top = 0.0,0.0
for iwg in range(0,len(comb_t)-1):
giwg = np.where((G_ref[comb_t_argsort[iwg]] + G_ref[comb_t_argsort[iwg+1:]] == 1) & (np.fabs(comb_t[comb_t_argsort[iwg]] - comb_t[comb_t_argsort[iwg+1:]]) <= delta))[0]
#for iwg2 in range(0,len(giwg)):
# errt = 1.0/(comb_err[comb_t_argsort[iwg]]**2+comb_err[comb_t_argsort[iwg+1+giwg[iwg2]]]**2)
# fabst = (1-np.fabs(comb_t[comb_t_argsort[iwg]] - comb_t[comb_t_argsort[iwg+1+giwg[iwg2]]])/delta)
# sum_bottom += fabst*errt
# sum_top += fabst*(comb_flux[comb_t_argsort[iwg+1+giwg[iwg2]]]-comb_flux[comb_t_argsort[iwg]])**2*errt
errt = 1.0/(comb_err[comb_t_argsort[iwg]]**2+comb_err[comb_t_argsort[iwg+1+giwg]]**2)
fabst = (1-np.fabs(comb_t[comb_t_argsort[iwg]] - comb_t[comb_t_argsort[iwg+1+giwg]])/delta)
sum_bottom += (fabst*errt).sum()
sum_top += (fabst*(comb_flux[comb_t_argsort[iwg+1+giwg]]-comb_flux[comb_t_argsort[iwg]])**2*errt).sum()
if ((sum_bottom == 0) | (sum_top == 0)):
return 999999999.
else:
if sum_top*1.0/sum_bottom == 0.0:
return 999999.
else:
return sum_top*1.0/sum_bottom
def calc_disp_delay(A,B,A_t,B_t,A_err,B_err,maxtime,timestep,minmu,maxmu,mustep,disp_type,delta=3.5,output=1,print_times=False,disparray=False,dispmatrix=False,mintime=None,inner50=True,simplemuerr=False,ALAG=31.5,use_overlap_mean=False,outfile=None,verbose=True):
#calculates the dispersion on a grid of tau (time delay) and mu values
#output = 1 returns just the minimum delay and mu
#output = 2 also returns the minimum dispersion
#output = 3 also returns dispersion matrix
#output = 4 also returns mu0
#if disparray=True, an array
#if outfile is set, dispersion values for each mu,tau pair are written
#to a text file
maxtimestep=maxtime
if outfile != None:
FILE = open(outfile,'w')
FILE.write('#tau mu disp\n')
B_err_t = B_err.copy()
disparrout,disparrmu,disparrtime = np.zeros(0),np.zeros(0),np.zeros(0)
start_t = time.time()
mindisp,mintau,minmu_out = -99,0.,0.
if use_overlap_mean:
galag,gblag = np.where((A_t > np.min(B_t)+ALAG) & (A_t < np.max(B_t) + ALAG)),np.where((B_t + ALAG > np.min(A_t)) & (B_t + ALAG < np.max(A_t)))
galag,gblag = galag[0],gblag[0]
mu0 = np.mean(A[galag])/np.mean(B[gblag])
mu0err = mu0*np.sqrt(np.sum(A_err[galag]*A_err[galag])/np.mean(A[galag])/np.mean(A[galag])/((len(galag))**2)+np.sum(B_err[gblag]*B_err[gblag])/np.mean(B[gblag])/np.mean(B[gblag])/((len(gblag))**2))
elif inner50:
mu0 = np.mean(A[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)])/np.mean(B[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)])
mu0err = mu0*np.sqrt(np.sum((A_err[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)])**2)/((np.mean(A[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)]))**2)+np.sum((B_err[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)])**2)/((np.mean(B[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)]))**2))/(np.ceil(3*len(A)/4)-np.floor(len(A)/4))
else:
mu0 = np.mean(A)*1.0/np.mean(B)
mu0err = mu0*np.sqrt(np.sum(A_err*A_err)/np.mean(A)/np.mean(A)+np.sum(B_err*B_err)/np.mean(B)/np.mean(B))/len(A)
if ((disp_type != 'D_2b') & (disp_type != 'D_2') & (disp_type != 'D_4_2b') & (disp_type != 'D_4_2')): sys.exit("disp_type must be either 'D_2' pr 'D_4_2' for calc_disp_delay")
tau = -1.0*maxtimestep
#if ((mintime != None) & (mintime > tau) & (mintime < -1.0*tau)): tau = mintime
if ((mintime != None)):
tau = mintime
basetime = tau
st2 = time.time()
while tau <= maxtimestep:
#Figuring out where the overlap between A and shifted B curves are
#Then, finds the flux ratio between them
#if tau > 0:
# gmu0A,gmu0B = np.where(A_t >= tau+A_t.min()),np.where(B_t <= B_t.max()-tau)
# gmu0A,gmu0B = gmu0A[0],gmu0B[0]
# Acmu0,Bcmu0 = A[gmu0A],B[gmu0B]
# Acmu0err,Bcmu0err = A_err[gmu0A],B_err[gmu0B]
#elif tau < 0:
# gmu0B,gmu0A = np.where(B_t >= tau+B_t.min()),np.where(A_t <= A_t.max()-tau)
# gmu0A,gmu0B = gmu0A[0],gmu0B[0]
# Acmu0,Bcmu0 = A[gmu0A],B[gmu0B]
# Acmu0err,Bcmu0err = A_err[gmu0A],B_err[gmu0B]
#else:
# Acmu0,Bcmu0 = A,B
# Acmu0err,Bcmu0err = A_err,B_err
#mu0 = np.mean(Acmu0)*1.0/np.mean(Bcmu0)
#mu0err = mu0*np.sqrt(np.sum(Acmu0err*Acmu0err)/np.mean(Acmu0)/np.mean(Acmu0)+np.sum(Bcmu0err*Bcmu0err)/np.mean(Bcmu0)/np.mean(Bcmu0))/len(Acmu0)
mu = minmu*mu0
muerr = minmu*mu0err
mindisp2,minmu2 = -99,0.
disparrtmp = np.zeros(0)
while mu <= maxmu*mu0:
if simplemuerr:
if ((disp_type == 'D_2b') | (disp_type == 'D_4_2b')):
A_err_t = A_err/mu
B_err_t = B_err
else:
A_err_t = A_err
B_err_t = B_err*mu
else:
for ibet in range(0,len(B_err)): B_err_t[ibet] = np.sqrt(muerr**2/mu/mu+(B_err[ibet])**2/B[ibet]/B[ibet])*B[ibet]*mu
if disp_type == 'D_2b':
if not simplemuerr: sys.exit('D_2b option only works with simplemuerr')
D_tmp = D_2b(A,B,A_t,B_t,A_err_t,B_err,mu,tau)
if disp_type == 'D_4_2b':
if not simplemuerr: sys.exit('D_4_2b option only works with simplemuerr')
D_tmp = D_4_2b(A,B,A_t,B_t,A_err_t,B_err,mu,tau,delta)
if disp_type == 'D_2': D_tmp = D_2(A,B,A_t,B_t,A_err,B_err_t,mu,tau)
if disp_type == 'D_4_2': D_tmp = D_4_2(A,B,A_t,B_t,A_err,B_err_t,mu,tau,delta)
if ((D_tmp < mindisp) | (mindisp == -99)): mindisp,mintau,minmu_out = D_tmp,tau,mu
if ((D_tmp < mindisp2) | (mindisp2 == -99)): mindisp2,minmu2 = D_tmp,mu
disparrtmp = np.append(disparrtmp,D_tmp)
if outfile != None:
#print D_tmp
FILE.write('%f %f %E\n'%(tau,mu,D_tmp))
mu += mustep*mu0
muerr += mustep*mu0err
if (((tau == -1*maxtimestep) & (mintime == None)) | (tau == mintime)):
dispmatrixout = np.array([disparrtmp])
else:
dispmatrixout = np.append(dispmatrixout,np.array([disparrtmp]),axis=0)
disparrout,disparrmu,disparrtime = np.append(disparrout,mindisp2),np.append(disparrmu,minmu2),np.append(disparrtime,tau)
if verbose:
if tau < (basetime + 0.5*timestep):
t_mu_1 = time.time()
print "Initial ETA: %i seconds"%(int((maxtimestep-basetime)/timestep*(t_mu_1-st2)-(t_mu_1-st2)))
tau += timestep
if verbose:
if ((tau >= (basetime + 0.25*(maxtimestep-basetime))) & (tau < (basetime + 0.25*(maxtimestep-basetime) + timestep))):
t_25 = time.time()
print "25%% Done - ETA: %i seconds"%(int(3*(t_25-start_t)))
if ((tau >= (basetime + 0.50*(maxtimestep-basetime))) & (tau < (basetime + 0.50*(maxtimestep-basetime) + timestep))):
t_50 = time.time()
print "50%% Done - ETA: %i seconds"%(int((t_50-start_t)))
if ((tau >= (basetime + 0.75*(maxtimestep-basetime))) & (tau < (basetime + 0.75*(maxtimestep-basetime) + timestep))):
t_75 = time.time()
print "75%% Done - ETA: %i seconds"%(int((t_75-start_t)/3.0))
if verbose:
if time.time()-start_t<2: print 'Total time elapsed: %f seconds'%(time.time()-start_t)
else: print 'Total time elapsed: %i seconds'%(time.time()-start_t)
if outfile != None: FILE.close()
if dispmatrix:
if output == 3:
return dispmatrixout,mintau,minmu_out,mindisp
elif output == 4:
return dispmatrixout,mintau,minmu_out,mindisp,mu0
else:
return dispmatrixout
elif disparray:
return disparrout,disparrmu,disparrtime
elif output == 1:
return mintau,minmu_out
else:
return mintau,minmu_out,mindisp
def calc_disp_delay_test(A,B,A_t,B_t,A_err,B_err,maxtime,timestep,minmu,maxmu,mustep,disp_type,delta=3.5,output=1,print_times=False,disparray=False,dispmatrix=False,mintime=None,inner50=True,simplemuerr=False):
#calculates the dispersion on a grid of tau (time delay) and mu values
#output = 1 returns just the minimum delay and mu
#output = 2 also returns the minimum dispersion
#if disparray=True, an array
B_err_t = B_err.copy()
n_timepts,n_mupts = 2*maxtime/timestep+1,(maxmu-minmu)/mustep+1
disparrout,disparrmu,disparrtime = np.zeros(n_timepts),np.zeros(n_timepts),np.zeros(n_timepts)
dispmatrixout = np.zeros((n_timepts,n_mupts))
start_t = time.time()
mindisp,mintau,minmu_out = -99,0.,0.
if inner50:
mu0 = np.mean(A[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)])/np.mean(B[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)])
mu0err = mu0*np.sqrt(np.sum((A_err[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)])**2)/((np.mean(A[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)]))**2)+np.sum((B_err[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)])**2)/((np.mean(B[np.floor(len(A)/4)-1:np.ceil(3*len(A)/4)]))**2))/(np.ceil(3*len(A)/4)-np.floor(len(A)/4))
else:
mu0 = np.mean(A)*1.0/np.mean(B)
mu0err = mu0*np.sqrt(np.sum(A_err*A_err)/np.mean(A)/np.mean(A)+np.sum(B_err*B_err)/np.mean(B)/np.mean(B))/len(A)
if ((disp_type != 'D_2b') & (disp_type != 'D_2') & (disp_type != 'D_4_2')): sys.exit("disp_type must be either 'D_2' pr 'D_4_2' for calc_disp_delay")
for itau in range(0,n_timepts):
tau = -1*maxtime+itau*timestep
muerr = minmu*mu0err
mindisp2,minmu2 = -99,0.
for imu in range(0,n_mupts):
mu = minmu*mu0+imu*mustep*mu0
if simplemuerr:
if (disp_type == 'D_2b'):
A_err_t = A_err/mu
B_err_t = B_err
else:
A_err_t = A_err
B_err_t = B_err*mu
else:
for ibet in range(0,len(B_err)): B_err_t[ibet] = np.sqrt(muerr**2/mu/mu+(B_err[ibet])**2/B[ibet]/B[ibet])*B[ibet]*mu
if disp_type == 'D_2b':
if not simplemuerr: sys.exit('D_2b option only works with simplemuerr')
D_tmp = D_2b(A,B,A_t,B_t,A_err_t,B_err,mu,tau)
if disp_type == 'D_2': D_tmp = D_2(A,B,A_t,B_t,A_err,B_err_t,mu,tau)
if disp_type == 'D_4_2': D_tmp = D_4_2(A,B,A_t,B_t,A_err,B_err_t,mu,tau,delta)
if ((D_tmp < mindisp) | (mindisp == -99)): mindisp,mintau,minmu_out = D_tmp,tau,mu
if ((D_tmp < mindisp2) | (mindisp2 == -99)): mindisp2,minmu2 = D_tmp,mu
if dispmatrix: dispmatrixout[int((tau+maxtime)/timestep)][int((mu-minmu*mu0)/mustep/mu0)] = D_tmp
muerr += mustep*mu0err
if disparray:
itau = int((tau+maxtime)/timestep)
disparrout[itau],disparrmu[itau],disparrtime[itau] = mindisp2,minmu2,tau
if tau < (-1.0*maxtimestep + 0.5*timestep):
t_mu_1 = time.time()
print "Initial ETA: %i seconds"%(int((maxtimestep-basetime)/timestep/(t_mu_1-start_t)-(t_mu_1-start_t)))
if ((tau >= (-1.0*maxtimestep + 0.25*(maxtimestep-basetime))) & (tau < (-1.0*maxtimestep + 0.25*(maxtimestep-basetime) + timestep))):
t_25 = time.time()
print "25%% Done - ETA: %i seconds"%(int(3*(t_25-start_t)))
if ((tau >= (-1.0*maxtimestep + 0.50*(maxtimestep-basetime))) & (tau < (-1.0*maxtimestep + 0.50*(maxtimestep-basetime) + timestep))):
t_50 = time.time()
print "50%% Done - ETA: %i seconds"%(int((t_50-start_t)))
if ((tau >= (-1.0*maxtimestep + 0.75*(maxtimestep-basetime))) & (tau < (-1.0*maxtimestep + 0.75*(maxtimestep-basetime) + timestep))):
t_75 = time.time()
print "75%% Done - ETA: %i seconds"%(int((t_75-start_t)/3.0))
if dispmatrix:
return dispmatrixout
elif disparray:
return disparrout,disparrmu,disparrtime
elif output == 1:
return mintau,minmu_out
else:
return mintau,minmu_out,mindisp
|
[
"takkyon13@gmail.com"
] |
takkyon13@gmail.com
|
ff984b9b73b212e1cece1c76f68d4190532a0066
|
15bc63298145b7864a35a38c86253b8dbcba1c5d
|
/osakeseuranta/src/ui/user_view.py
|
de8efd8bd85779652f20fd02830fc5001988fae9
|
[] |
no_license
|
jarisokka/ot-harjoitustyo
|
8c34f9994a70ef18d49e35a7d6c2ac38f0af2f56
|
33281fc7466a15b226b3a4e113faef2ea339a540
|
refs/heads/master
| 2023-04-30T00:30:47.291865
| 2021-05-11T08:10:43
| 2021-05-11T08:10:43
| 348,694,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,399
|
py
|
from tkinter import *
from tkinter import Tk, ttk, constants, messagebox, StringVar
from services.stock_services import stock_services, InvalidValuesError, StockExistsError, GeneralError
class UserView:
def __init__(self, root, user):
self._root = root
self._user = user
self._root.geometry('950x500')
self._frame = None
self.clicked = StringVar()
self._ticker = None
self._name = None
self._data = stock_services.initialize_data(self._user)
self._data = stock_services.get_stock_data(self._data)
self._list = stock_services.read_list('search_list.csv')
self._initialize()
def pack(self):
self._frame.pack(fill=constants.X)
def destroy(self):
self._user = None
self._data = None
self._frame.destroy()
def update_data(self):
self._data = stock_services.initialize_data(self._user)
self._data = stock_services.get_stock_data(self._data)
def update_treeview(self):
tree = self.stock_tree.get_children()
for item in tree:
self.stock_tree.delete(item)
for data in self._data:
name = self._data[data][0]
date = self._data[data][2]
price = self._data[data][1]
now = self._data[data][3]
money = self._data[data][4]
procent = self._data[data][5]
self.stock_tree.insert(parent='', index='end', iid=data, text='', values=(name, date, price, now, money, procent))
def _clear_values(self):
self.price_entry.delete(0, END)
self.date_entry.delete(0, END)
clear = ' '
self._initialize_ticker_value(clear)
self._initialize_name_value(clear)
self._ticker = None
self._name = None
def _show_stock(self):
self._clear_values()
self._name = self.clicked.get()
self._ticker = self._list[self._name]
self._initialize_ticker_value(self._ticker)
self._initialize_name_value(self._name)
def _submit_handler(self):
user = self._user
ticker = self._ticker
name = self._name
price = self.price_entry.get()
date = self.date_entry.get()
try:
stock_services.check_values(ticker, name, price, date)
except InvalidValuesError as error:
ticker = None
name = None
price = None
date = None
return messagebox.showinfo(
'ok', f'{error}',
parent=self._root)
try:
stock_services.create_new(user, ticker, name, price, date)
self._clear_values()
self.update_data()
self.update_treeview()
return messagebox.showinfo(
'ok', f'Osake {name} lisättiin listaan',
parent=self._root)
except StockExistsError:
return messagebox.showerror(
'error', f'Osake {name} on jo listalla',
parent=self._root)
def _remove_stock(self):
try:
selected = self.stock_tree.focus()
values = self.stock_tree.item(selected, 'values')
user = self._user
ticker = self._list[values[0]]
stock_services.remove_stock(user, ticker)
self._clear_values()
self.update_data()
self.update_treeview()
return messagebox.showinfo(
'ok', 'Valittu osake poistettiin',
parent=self._root)
except GeneralError:
return messagebox.showerror(
'error', f'Osakkeen {values[0]} poisto ei onnistunut',
parent=self._root)
def _update_handler(self):
try:
selected = self.stock_tree.focus()
values = self.stock_tree.item(selected, 'values')
user = self._user
ticker = self._list[values[0]]
price = self.price_entry.get()
date = self.date_entry.get()
stock_services.update_values(user, ticker, price, date)
self._clear_values()
self.update_data()
self.update_treeview()
return messagebox.showinfo(
'ok', f'Osakkeen {values[0]} tiedot päivitettiin',
parent=self._root)
except GeneralError:
return messagebox.showinfo(
'ok', f'Osakkeen {values[0]} päivitys ei onnistunut',
parent=self._root)
def _select_stock_handler(self, event):
self._clear_values()
selected = self.stock_tree.focus()
values = self.stock_tree.item(selected, 'values')
self._ticker = self._list[values[0]]
self._name = values[0]
self._initialize_ticker_value(self._ticker)
self._initialize_name_value(self._name)
self.price_entry.insert(0, values[2])
self.date_entry.insert(0, values[1])
def _initialize_stock_list_field(self):
stock_view = ttk.Treeview(master=self._frame)
stock_view.grid(row=0, columnspan=6, padx=10, pady=10)
view_scroll = Scrollbar(stock_view)
view_scroll.pack(side=RIGHT, fill=Y)
self.stock_tree = ttk.Treeview(stock_view, yscrollcommand=view_scroll.set, selectmode='extended')
self.stock_tree.pack()
view_scroll.config(command=self.stock_tree.yview)
self.stock_tree['columns'] = ('#1', '#2', '#3', '#4', '#5', '#6')
# Format columns
self.stock_tree.column('#0', width=0, stretch=NO)
self.stock_tree.column('#1', width=200, anchor=W)
self.stock_tree.column('#2', width=100, anchor=W)
self.stock_tree.column('#3', width=100, anchor=CENTER)
self.stock_tree.column('#4', width=100, anchor=CENTER)
self.stock_tree.column('#5', width=100, anchor=CENTER)
self.stock_tree.column('#6', width=100, anchor=CENTER)
# Create Headings
self.stock_tree.heading('#0', text='', anchor=W)
self.stock_tree.heading('#1', text='Yrityksen nimi', anchor=W)
self.stock_tree.heading('#2', text='ostopäivä', anchor=W)
self.stock_tree.heading('#3', text='ostohinta', anchor=CENTER)
self.stock_tree.heading('#4', text='kurssi', anchor=CENTER)
self.stock_tree.heading('#5', text='€ kehitys', anchor=CENTER)
self.stock_tree.heading('#6', text='% kehitys', anchor=CENTER)
for data in self._data:
name = self._data[data][0]
date = self._data[data][2]
price = self._data[data][1]
now = self._data[data][3]
money = self._data[data][4]
procent = self._data[data][5]
self.stock_tree.insert(parent='', index='end', iid=data, text='', values=(name, date, price, now, money, procent))
self.stock_tree.bind('<ButtonRelease-1>', self._select_stock_handler)
def _initialize_search_field(self):
search_label = ttk.Label(master=self._slave, text='Valitse osake')
search_label.grid(row=3, column=0, padx=10, pady=10)
drop = ttk.OptionMenu(self._slave, self.clicked, *self._list)
drop.grid(row=3, column=1, sticky=(constants.E, constants.W), padx=10, pady=5)
def _initialize_ticker_field(self):
ticker_label = ttk.Label(master=self._slave, text='Osakkeen tunnus')
ticker_label.grid(row=4, column=0, padx=5, pady=5)
def _initialize_ticker_value(self, ticker):
tvalue_label = ttk.Label(master=self._slave, text=ticker)
tvalue_label.grid(row=4, column=1, padx=5, pady=5)
def _initialize_name_field(self):
name_label = ttk.Label(master=self._slave, text='Osakkeen nimi')
name_label.grid(row=4, column=2, padx=5, pady=5)
def _initialize_name_value(self, name):
nvalue_label = ttk.Label(master=self._slave, text=name)
nvalue_label.grid(row=4, column=3, padx=5, pady=5)
def _initialize_price_field(self):
price_label = ttk.Label(master=self._slave, text='Hankintahinta')
price_label.grid(row=5, column=0, padx=5, pady=5)
self.price_entry = ttk.Entry(master=self._slave, width=30)
self.price_entry.grid(row=5, column=1, sticky=(constants.E, constants.W), padx=10, pady=5)
def _initialize_date_field(self):
date_label = ttk.Label(master=self._slave, text='Hankinta ajankohta')
date_label.grid(row=5, column=2, padx=5, pady=5)
self.date_entry = ttk.Entry(master=self._slave, width=30)
self.date_entry.grid(row=5, column=3, sticky=(constants.E, constants.W), padx=10, pady=5)
def _initialize(self):
self._frame = ttk.Frame(master=self._root)
self._slave = ttk.LabelFrame(master=self._frame, text='Omien osakkeiden hallinta')
self._slave.grid(row=1, columnspan=6, padx=10, pady=10)
self._initialize_stock_list_field()
self._initialize_search_field()
self._initialize_ticker_field()
self._initialize_name_field()
self._initialize_price_field()
self._initialize_date_field()
show_stock_button = ttk.Button(
master=self._slave,
text='Lisää osakkeen tiedot',
command=self._show_stock
)
save_data_button = ttk.Button(
master=self._slave,
text='Tallenna',
command=self._submit_handler
)
update_data_button = ttk.Button(
master=self._slave,
text='Päivitä',
command=self._update_handler
)
remove_data_button = ttk.Button(
master=self._slave,
text='Poista valittu',
command=self._remove_stock
)
clear_stock_button = ttk.Button(
master=self._slave,
text='Tyhjennä syötteet',
command=self._clear_values
)
show_stock_button.grid(row=3, column=2, padx=10, pady=5, sticky=constants.EW)
save_data_button.grid(row=6, column=1, padx=10, pady=5, sticky=constants.EW)
update_data_button.grid(row=6, column=3, padx=10, pady=5, sticky=constants.EW)
remove_data_button.grid(row=7, column=1, padx=10, pady=5, sticky=constants.EW)
clear_stock_button.grid(row=7, column=3, padx=10, pady=5, sticky=constants.EW)
|
[
"sokkajar@gmail.com"
] |
sokkajar@gmail.com
|
14c7f9577956db004b7db590687e30e8fdba3192
|
ad0e853db635edc578d58891b90f8e45a72a724f
|
/rllib/examples/inference_and_serving/policy_inference_after_training.py
|
17f033847ec1c046e9d6f405d8517c6f099104ee
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
ericl/ray
|
8c93fc713af3b753215d4fe6221278700936e2db
|
e9a1c6d814fb1a81033809f56695030d651388f5
|
refs/heads/master
| 2023-08-31T11:53:23.584855
| 2023-06-07T21:04:28
| 2023-06-07T21:04:28
| 91,077,004
| 2
| 4
|
Apache-2.0
| 2023-01-11T17:19:10
| 2017-05-12T09:51:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,804
|
py
|
"""
Example showing how you can use your trained policy for inference
(computing actions) in an environment.
Includes options for LSTM-based models (--use-lstm), attention-net models
(--use-attention), and plain (non-recurrent) models.
"""
import argparse
import gymnasium as gym
import os
import ray
from ray import air, tune
from ray.rllib.algorithms.algorithm import Algorithm
from ray.tune.registry import get_trainable_cls
parser = argparse.ArgumentParser()
parser.add_argument(
"--run", type=str, default="PPO", help="The RLlib-registered algorithm to use."
)
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "torch"],
default="torch",
help="The DL framework specifier.",
)
parser.add_argument("--eager-tracing", action="store_true")
parser.add_argument(
"--stop-iters",
type=int,
default=200,
help="Number of iterations to train before we do inference.",
)
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train before we do inference.",
)
parser.add_argument(
"--stop-reward",
type=float,
default=150.0,
help="Reward at which we stop training before we do inference.",
)
parser.add_argument(
"--explore-during-inference",
action="store_true",
help="Whether the trained policy should use exploration during action "
"inference.",
)
parser.add_argument(
"--num-episodes-during-inference",
type=int,
default=10,
help="Number of episodes to do inference over after training.",
)
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None)
config = (
get_trainable_cls(args.run)
.get_default_config()
.environment("FrozenLake-v1")
# Run with tracing enabled for tf2?
.framework(args.framework, eager_tracing=args.eager_tracing)
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
)
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
print("Training policy until desired reward/timesteps/iterations. ...")
tuner = tune.Tuner(
args.run,
param_space=config.to_dict(),
run_config=air.RunConfig(
stop=stop,
verbose=2,
checkpoint_config=air.CheckpointConfig(
checkpoint_frequency=1, checkpoint_at_end=True
),
),
)
results = tuner.fit()
print("Training completed. Restoring new Trainer for action inference.")
# Get the last checkpoint from the above training run.
checkpoint = results.get_best_result().checkpoint
# Create new Algorithm and restore its state from the last checkpoint.
algo = Algorithm.from_checkpoint(checkpoint)
# Create the env to do inference in.
env = gym.make("FrozenLake-v1")
obs, info = env.reset()
num_episodes = 0
episode_reward = 0.0
while num_episodes < args.num_episodes_during_inference:
# Compute an action (`a`).
a = algo.compute_single_action(
observation=obs,
explore=args.explore_during_inference,
policy_id="default_policy", # <- default value
)
# Send the computed action `a` to the env.
obs, reward, done, truncated, _ = env.step(a)
episode_reward += reward
# Is the episode `done`? -> Reset.
if done:
print(f"Episode done: Total reward = {episode_reward}")
obs, info = env.reset()
num_episodes += 1
episode_reward = 0.0
algo.stop()
ray.shutdown()
|
[
"noreply@github.com"
] |
noreply@github.com
|
7d2b33db4c8496d881166db313cdfc29ef465a34
|
a2bc17600cd0f637b2188ae0feeac58fe68ff82d
|
/tests/unit/test_lock.py
|
b9f953c717b37679f842b478931f5b511ddad87b
|
[
"Apache-2.0"
] |
permissive
|
stealthycoin/lynk
|
0f8f7649e98b65d0fa0888ffd53f99460740a453
|
49e8aebdfe39d468722740d889632f7775b4e5fd
|
refs/heads/master
| 2020-04-09T22:02:00.237079
| 2019-01-18T00:08:32
| 2019-01-18T00:08:32
| 160,618,417
| 3
| 0
|
Apache-2.0
| 2019-01-18T00:08:33
| 2018-12-06T04:25:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,731
|
py
|
import json
import pytest
import mock
from lynk.lock import Lock
from lynk.techniques import BaseTechnique
from lynk.refresh import LockRefresherFactory
from lynk.refresh import LockRefresher
from lynk.exceptions import LockNotGrantedError
@pytest.fixture
def create_lock():
def wrapped(name=None, technique=None, refresher=False):
if name is None:
name = 'lock name'
if technique is None:
technique = mock.Mock(spec=BaseTechnique)
if refresher:
refresh_factory = mock.Mock(spec=LockRefresherFactory)
else:
refresh_factory = None
lock = Lock(name, technique, refresh_factory)
return lock, technique, refresh_factory
return wrapped
class TestLock(object):
def test_can_serialize_lock(self, create_lock):
lock, tech, _ = create_lock(name='foo')
tech.serialize.return_value = 'SERIALIZED_TECHNIQUE'
serial = json.loads(lock.serialize())
assert serial == {
'__version': 'Lock.1',
'name': 'foo',
'technique': 'SERIALIZED_TECHNIQUE',
}
def test_can_acquire_lock(self, create_lock):
lock, tech, _ = create_lock()
lock.acquire()
tech.acquire.assert_called_with('lock name', 20, max_wait_seconds=300)
def test_can_acquire_lock_with_custom_params(self, create_lock):
lock, tech, _ = create_lock()
lock.acquire(100, max_wait_seconds=10)
tech.acquire.assert_called_with('lock name', 100, max_wait_seconds=10)
def test_can_release_lock(self, create_lock):
lock, tech, _ = create_lock()
lock.release()
tech.release.assert_called_with('lock name')
def test_can_refresh_lock(self, create_lock):
lock, tech, _ = create_lock()
lock.refresh()
tech.refresh.assert_called_with('lock name')
def test_context_manager_does_acquire_and_release(self, create_lock):
lock, tech, _ = create_lock()
with lock():
pass
tech.acquire.assert_called_with('lock name', 20, max_wait_seconds=300)
tech.release.assert_called_with('lock name')
def test_lock_not_granted_does_escape_context_manager(self, create_lock):
# The context manager swallows errors, its important that the
# LockNotGrantedError escapes this otherwise it could be silenced and
# the with block would exceute and operate on a resource protected by
# the lock, even though the lock acquisition failed.
# Also the release should not be called, since the acquire failed.
lock, tech, _ = create_lock()
tech.acquire.side_effect = LockNotGrantedError()
with pytest.raises(LockNotGrantedError):
with lock():
pass
tech.acquire.assert_called_with('lock name', 20, max_wait_seconds=300)
tech.release.assert_not_called()
def test_acquire_does_create_and_start_refresher(self, create_lock):
lock, tech, refresher_factory = create_lock(refresher=True)
mock_refresher = mock.Mock(spec=LockRefresher)
refresher_factory.create_lock_refresher.return_value = mock_refresher
lock.acquire()
refresher_factory.create_lock_refresher.assert_called_with(
lock,
15,
)
mock_refresher.start.assert_called_once()
def test_release_does_stop_refresher(self, create_lock):
lock, tech, refresher_factory = create_lock(refresher=True)
mock_refresher = mock.Mock(spec=LockRefresher)
refresher_factory.create_lock_refresher.return_value = mock_refresher
lock.acquire()
lock.release()
mock_refresher.stop.assert_called_once()
|
[
"stealthycoin@users.noreply.github.com"
] |
stealthycoin@users.noreply.github.com
|
ceb3d6340e2218d1766db65a40ce31258490f3f8
|
7e5dedaafe433dc45feeb428885c37ac0ebe9f9b
|
/cnn.py
|
d29cc45e7557046b6f370cd6d10973fd08dcb6fe
|
[] |
no_license
|
BalramKokkula/dogcatclassification
|
035672891918652f414a0bfc9a98d71a82f9fe10
|
a48dde37883ea971626a19753b14d6e2d6462775
|
refs/heads/master
| 2022-12-04T01:21:42.622774
| 2020-08-25T13:06:43
| 2020-08-25T13:06:43
| 289,666,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,515
|
py
|
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('/Users/BalramKokkula/PycharmProjects/dog_cat_classifier',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('/Users/BalramKokkula/PycharmProjects/dog_cat_classifier',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
model = classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 1,
validation_data = test_set,
validation_steps = 2000)
classifier.save("model.h5")
print("Saved model to disk")
# Part 3 - Making new predictions
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('/Users/BalramKokkula/PycharmProjects/dog_cat_classifier', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = model.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
print(prediction)
else:
prediction = 'cat'
print(prediction)
|
[
"ballu4u1@gmail.com"
] |
ballu4u1@gmail.com
|
d9fab95d5078fae0fff3c58c49ac65828bc8dba4
|
e2085f5618b62e56eeadbc1bad3e9b8b2594df90
|
/Python/pythonCERNCourse/day1.py
|
dcdb2d1956a8ec3c91317eff4d767023d59a182b
|
[] |
no_license
|
oviazlo/SelfSTUDY
|
cc03fe70cf8e9922d5c7d85849b8ce21a1e85679
|
96a4d1f68a3ad28f5f2356896cddb84653009fb8
|
refs/heads/master
| 2022-01-31T11:04:25.002475
| 2022-01-01T15:11:59
| 2022-01-01T15:11:59
| 132,145,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
def my_enumerate(sequence):
""" docstring"""
return zip(range(len(sequence)), sequence)
print("standard implementation")
for i, x in enumerate('hello'):
print(f'{x} was in position {i}')
print("custom implementation")
for i, x in my_enumerate('hello'):
print(f'{x} was in position {i}')
|
[
"oleksandr.viazlo@cern.ch"
] |
oleksandr.viazlo@cern.ch
|
9d1c376c39f97015fb6195dde15ff62496f9487e
|
228e3a79fd9b1e5d737897df382114c264cf625b
|
/Agent3.py
|
0c698d22cfaa985679f4293fdc26dff390cf09a0
|
[] |
no_license
|
duniahakim/Qwirkle
|
f11e3d47877bcfc8a5e0bd58cce7cfcd0dc10447
|
f58e9694532ccc0095b01350bb02c3a4ffa3b6ec
|
refs/heads/main
| 2023-01-10T05:29:34.845326
| 2020-11-09T23:58:30
| 2020-11-09T23:58:30
| 311,495,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,565
|
py
|
import copy
from Player import Player
from Qwirkle import GameBoard
from termcolor import colored
class Agent3(Player):
def print_tiles(self, tiles):
tiles_output = ''
for tile in tiles:
tiles_output += colored(tile.shape, tile.color) + ' '
print('\n Your Tiles: %s' % tiles_output)
def specialPrint(self, reorderedPlays):
for seriesPlay in reorderedPlays:
print(seriesPlay)
print('')
def reorderPlays(self, optionalPlays):
result = []
for optionalPlay in optionalPlays:
score = 0
currOptionalPlay = []
for play in optionalPlay:
score += play[3]
currOptionalPlay.append((play[0], play[1], play[2]))
result.append((score, currOptionalPlay))
return result
def play_turn(self, board):
self.print_tiles(self._tiles)
validPlays = board.valid_plays()
optionalPlays = []
for x, y in validPlays:
tiles = self._tiles.copy()
for tile in tiles:
optionalPlay = []
boardCopy = GameBoard(board = board.get_board(), previous_board = board.get_prevoius_board(), plays = board.get_plays(), last_plays = board.get_last_plays())
if (boardCopy.play(tile, x = x, y = y)):
potentialScore = boardCopy.score()
optionalPlay.append((tile, x, y, potentialScore))
optionalPlays.append(optionalPlay.copy())
tiles2 = tiles.copy()
tiles2.pop(tiles2.index(tile))
for x2, y2 in boardCopy.valid_plays():
for tile2 in tiles2:
optionalPlay2 = optionalPlay.copy()
boardCopy2 = GameBoard(board = boardCopy.get_board(), previous_board = boardCopy.get_prevoius_board(), plays = boardCopy.get_plays(), last_plays = boardCopy.get_last_plays())
if (boardCopy2.play(tile2, x = x2, y = y2)):
potentialScore2 = boardCopy2.score()
optionalPlay2.append((tile2, x2, y2, potentialScore2))
optionalPlays.append(optionalPlay2)
tiles3 = tiles2.copy()
tiles3.pop(tiles3.index(tile2))
for x3, y3 in boardCopy2.valid_plays():
for tile3 in tiles3:
optionalPlay3 = optionalPlay2.copy()
boardCopy3 = GameBoard(board = boardCopy2.get_board(), previous_board = boardCopy2.get_prevoius_board(), plays = boardCopy2.get_plays(), last_plays = boardCopy2.get_last_plays())
if (boardCopy3.play(tile3, x = x3, y = y3)):
potentialScore3 = boardCopy3.score()
optionalPlay3.append((tile3, x3, y3, potentialScore3))
optionalPlays.append(optionalPlay3)
if (len(optionalPlays) == 0):
return
reorderedPlays = self.reorderPlays(optionalPlays)
result = max(reorderedPlays, key = lambda x: x[0])
bestPlays = result[1]
for (tile, x, y) in bestPlays:
board.play(tile, x = x, y = y)
self._tiles.pop(self._tiles.index(tile))
return result[0]
|
[
"noreply@github.com"
] |
noreply@github.com
|
1ea7ec9cd6f0f33042d9eac704a7f47a193c0f13
|
8bcf973008b1d7549f59501a1667909848ea87dd
|
/Day0617/staff_info/bin/start.py
|
ff176549a916a65c76e64836aa50c52a7c6e5635
|
[] |
no_license
|
simplesmall/Python-FullStack
|
74ffeb2119eecb7fcb21a136d01aaaf2bcc2c24c
|
210844ef6443a5543d49a20dbec2db9a9b960230
|
refs/heads/master
| 2022-12-17T00:56:40.515335
| 2019-11-15T02:07:57
| 2019-11-15T02:07:57
| 221,816,447
| 0
| 1
| null | 2022-12-13T19:22:26
| 2019-11-15T01:10:55
|
Python
|
UTF-8
|
Python
| false
| false
| 327
|
py
|
import sys
import os
# print(sys.path)
#获取start.py的路径
#当前文件往上翻两层 staff_info
project_path = os.path.dirname(os.path.dirname(__file__))
sys.path.append(project_path) #把staff_info添加到sys.path中
print(project_path)
from core import main
if __name__ == '__main__':
main.home()
|
[
"simplesmall@outlook.com"
] |
simplesmall@outlook.com
|
8b7d5844b805f7521b51862be13a3d5c04a7be2c
|
5c200da6dcc7ef6ad2cf87ca2c89b3a8a5480acf
|
/clase_simulacion.py
|
318cb2df11765cadbf017857ed82b8384a7ad277
|
[] |
no_license
|
DavidVillalobosG/TareaProgramadaTermo
|
d1a67bb7b125adcaedbd33f46f668dfa41b66890
|
0f33d24a8773fda2e34d98cdc1e82a8db005318f
|
refs/heads/master
| 2022-11-23T06:17:27.269729
| 2020-07-15T03:44:42
| 2020-07-15T03:44:42
| 279,751,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,950
|
py
|
#Fecha de creación: 19/6/2020 9:30pm
#Última actualización: 13/7/2020
#Versión 3.8.3
#Importación de librerías
import tkinter as tk
import numpy as np
import matplotlib.pyplot as plt
from funciones import calcularNuevaMatriz
#Definición de la clase Simulacion
class Simulacion:
"""Definición de atributos"""
TAM_MATRIZ_X = 100
TAM_MATRIZ_Y = 100
TAM_GOTA_X = 10
TAM_GOTA_Y = 10
TAM_PIXELS = 8
VALOR_CAFE = 0
VALOR_CREMA = 1
COLOR_CAFE = "brown4"
COLOR_CREMA = "linen"
ESPERA = 0
iteraciones = 0
"""Definición de métodos"""
def __init__(self):
"""Creación de la matriz"""
self.inicializarMatriz()
"""Creación de la ventana de simulación"""
self.ventana = tk.Tk()
self.canvas = tk.Canvas(width=self.TAM_MATRIZ_X*self.TAM_PIXELS,height=self.TAM_MATRIZ_Y*self.TAM_PIXELS,bg=self.COLOR_CAFE)
self.canvas.pack()
self.dibujarMatriz()
"""Iniciar main loop"""
self.ventana.after(self.ESPERA,self.animarMatriz)
self.ventana.mainloop()
def inicializarMatriz(self):
"""Café"""
self.matriz = np.ones((self.TAM_MATRIZ_X,self.TAM_MATRIZ_Y))*self.VALOR_CAFE
"""Gota de crema en el café"""
for x in range(self.TAM_MATRIZ_X//2-self.TAM_GOTA_X//2,self.TAM_MATRIZ_X//2+self.TAM_GOTA_X//2):
for y in range(self.TAM_MATRIZ_Y//2-self.TAM_GOTA_Y//2,self.TAM_MATRIZ_Y//2+self.TAM_GOTA_Y//2):
self.matriz[x,y]=self.VALOR_CREMA
def dibujarMatriz(self):
"""Café"""
self.canvas.delete("all")
"""Crema"""
for x in range(0,self.TAM_MATRIZ_X - 1):
for y in range(0,self.TAM_MATRIZ_Y - 1):
if self.matriz[x,y] == self.VALOR_CREMA:
x_inicio = x*self.TAM_PIXELS
y_inicio = y*self.TAM_PIXELS
self.canvas.create_rectangle(x_inicio,y_inicio,x_inicio+self.TAM_PIXELS-1,y_inicio+self.TAM_PIXELS-1,outline=self.COLOR_CREMA,fill=self.COLOR_CREMA)
tk.Label(self.canvas,text=self.iteraciones).place(x=20,y=20)
def animarMatriz(self):
self.actualizarMatriz()
self.ventana.after(self.ESPERA,self.animarMatriz)
def actualizarMatriz(self):
calcularNuevaMatriz(self.matriz)
self.iteraciones=self.iteraciones+1
self.dibujarMatriz()
self.entropia()
self.contarentropia()
if self.iteraciones < 20001:
plt.scatter(x=self.iteraciones, y=self.ENTROPIA_ITERACION)
plt.pause(0.0000001)
def entropia(self):
self.LISTA_ENTROPIA = []
self.CONTADOR_ENTROPIA = 0
self.ENTROPIA_SECTOR = 0
for k in range(0,10):
for i in range(10*k,(10+10*k)):
for m in range(0,10):
for j in range(10*m,(10+10*m)):
if self.matriz[i][j] == self.VALOR_CREMA:
self.CONTADOR_ENTROPIA = self.CONTADOR_ENTROPIA + 1
self.PROB_SECTOR = self.CONTADOR_ENTROPIA/100
self.LISTA_ENTROPIA.append(self.PROB_SECTOR)
self.CONTADOR_ENTROPIA = 0
self.ENTROPIA_SECTOR = 0
def contarentropia(self):
while 0 in self.LISTA_ENTROPIA: self.LISTA_ENTROPIA.remove(0)
self.ENTROPIA_ITERACION = 0
for i in range(len(self.LISTA_ENTROPIA)):
self.ENTROPIA_ITERACION = self.ENTROPIA_ITERACION + (self.LISTA_ENTROPIA[i]*np.log(self.LISTA_ENTROPIA[i]))
self.ENTROPIA_ITERACION = -1*self.ENTROPIA_ITERACION
|
[
"noreply@github.com"
] |
noreply@github.com
|
4bd195e3df647270c6eea05e2cc3f7ac68928f9e
|
4852ff5fc90646473a62b2a57b54e0f4b1ad7818
|
/eval.py
|
79be7765a51046ec5a8eac4a01036afae6c03ee3
|
[] |
no_license
|
gunterya/idss_pw3
|
f4765ac93733c1810af53d1c88e78d8a60c250b6
|
12844bd55edb8c67c9a0ae7121db1df17716da5c
|
refs/heads/master
| 2020-03-15T23:52:55.659581
| 2018-06-07T09:09:49
| 2018-06-07T09:09:49
| 132,402,637
| 2
| 3
| null | 2018-05-26T10:16:18
| 2018-05-07T03:32:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
class_names = ['HF presence', 'HF absence']
def eval(model, X, y, output_path):
prediction = model.predict(X)
# plot ROC curve
plt.figure()
plot_roc(y, prediction)
plt.savefig(output_path + 'roc_curve.png')
plt.show()
for i in range(0, len(prediction)):
if prediction[i][0] > prediction[i][1]: # absence
prediction[i][0] = 1
prediction[i][1] = 0
else: # presence
prediction[i][0] = 0
prediction[i][1] = 1
# Confusion matrix
cnf_matrix = confusion_matrix(y[:,1], prediction[:,1], labels=[1, 0]) # target = HF presence (value=1)
# print(cnf_matrix)
np.set_printoptions(precision=2)
# indices
eval_indices(cnf_matrix)
# plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, title='Confusion matrix')
plt.savefig(output_path + 'confusion_matrix.png')
plt.show()
def eval_indices(cnf_matrix):
sensitivity = cnf_matrix[1][1] / cnf_matrix[1].sum(axis=0)
specificity = cnf_matrix[0][0] / cnf_matrix[0].sum(axis=0)
FP_rate = 1 - specificity
FN_rate = 1 - sensitivity
print("Sensitivity: %.2f%%\nSpecificity: %.2f%%" % (sensitivity * 100, specificity * 100))
print("False positive rate: %.2f%%\nFalse negative rate: %.2f%%" % (FP_rate * 100, FN_rate * 100))
recall = sensitivity
precision = cnf_matrix[1][1] / (cnf_matrix[1][1] + cnf_matrix[0][1])
f1 = 2 * ((recall * precision) / (recall + precision))
print("Recall: %.2f%%\nPrecision: %.2f%%\nF1: %.2f%%" % (recall * 100, precision * 100, f1 * 100))
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
# print(np.sum(cm))
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
txt = format(cm[i, j], fmt) +' ('+ format(cm[i, j] / np.sum(cm) * 100, '.2f') + '%)'
plt.text(j, i, txt,
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Estimated label')
def plot_roc(y, prediction):
from sklearn.metrics import roc_curve, auc
# Compute ROC curve and ROC area for each class
n_classes=2
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y[:,i], prediction[:,i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Plot of a ROC curve for a specific class
for i in range(n_classes):
#plt.figure()
plt.plot(fpr[i], tpr[i], label='%s' % class_names[i])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
# plt.show()
|
[
"krotslya2@gmail.com"
] |
krotslya2@gmail.com
|
e3ae23e183adf64fde585cc7af4664706cfcceab
|
eed9b3d099facd98b8a139681808997d60b4e19c
|
/decorator_opt_arg/decorators.py
|
4442035bdc787580a9d4d98b7258dade8ef37179
|
[] |
no_license
|
pybites/blog_code
|
1240a3393a3672681d97c369711be6c7415d8c10
|
902ebb87e5f7a407714d0e399833f0331a1b915d
|
refs/heads/master
| 2022-12-10T19:50:57.718119
| 2020-08-08T17:13:15
| 2020-08-08T17:13:15
| 76,716,190
| 49
| 47
| null | 2022-11-22T01:54:20
| 2016-12-17T09:51:12
|
HTML
|
UTF-8
|
Python
| false
| false
| 514
|
py
|
from functools import wraps
import time
def sleep(seconds=None):
def real_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
print('Sleeping for {} seconds'.format(seconds))
time.sleep(seconds if seconds else 1)
return func(*args, **kwargs)
return wrapper
return real_decorator
if __name__ == '__main__':
@sleep(1) # @sleep without arg fails
def hello():
print('hello world')
for _ in range(3):
hello()
|
[
"pybites@projects.bobbelderbos.com"
] |
pybites@projects.bobbelderbos.com
|
2ed9f83a79a48cfd95f2eb595c5dee45aee3f6df
|
1793cc93dda9abcfb511a3b6035bfdb47aee0ed9
|
/cli/__init__.py
|
55a92b05fdc75c21ba2172df49aa0c1fa970f977
|
[] |
no_license
|
joaodlf/flask-boilerplate
|
3cbb0faab9168ab05cbd64684c6e208a29ce85de
|
4fc6770958806b613f8c23480fc9cd80f9e55b53
|
refs/heads/master
| 2021-04-28T15:18:54.839064
| 2019-05-11T19:53:23
| 2019-05-11T19:53:23
| 121,984,513
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,528
|
py
|
import atexit
import logging
import os
import logzero
import pendulum
import sentry_sdk
from logzero import logger
from sentry_sdk import capture_message
from config import SENTRY_DSN
from models.cli import Cli as CliModel
class Cli:
"""The class every CLI job should inherit from.
It creates/updates a cron entry in the database; manages the log file; ensures there aren't duplicate processes running."""
def __init__(self, name):
self.pid = os.getpid()
self.name = name
self.cron_db_entry = None
self.logs_dir = "cli/logs"
if SENTRY_DSN:
sentry_sdk.init(SENTRY_DSN)
# Set the logger.
logzero.loglevel(logging.INFO)
logzero.logfile(
f"{self.logs_dir}/{self.name}.log", maxBytes=1000000, backupCount=3
)
# Execute finish() at the end of CLI execution.
atexit.register(self._finish)
try:
# Check if the cron entry exists.
cron_db_entry = CliModel.select().where(CliModel.name == self.name).get()
try:
# This doesn't actually kill the process, just sends a signal of 0 to test it.
os.kill(cron_db_entry.pid, 0)
except ProcessLookupError:
# Process does not exist, good to go.
pass
else:
# Process still exists, stop execution!
error = f"Process #{cron_db_entry.pid} ({cron_db_entry.name}) is still running!"
if SENTRY_DSN:
capture_message(
f"Process #{cron_db_entry.pid} ({cron_db_entry.name}) is still running!",
level="error",
)
else:
logger.error(error)
exit(1)
except CliModel.DoesNotExist:
# First time running.
logger.info(f"Adding new cron {self.name}")
cron_db_entry = CliModel.create(name=self.name)
cron_db_entry.pid = self.pid
cron_db_entry.dt_start = pendulum.now()
cron_db_entry.dt_finish = None
self.cron_db_entry = cron_db_entry
self.cron_db_entry.save()
logger.info("--- STARTING ---")
logger.info(f"--- Logging to {self.logs_dir}/{self.name}.log ---")
def _finish(self):
"""Called at the end of execution."""
self.cron_db_entry.dt_finish = pendulum.now()
self.cron_db_entry.save()
logger.info("--- FINISHING ---")
|
[
"jdlferreira90@gmail.com"
] |
jdlferreira90@gmail.com
|
5a92103a1953fe3eabb579d1a1438ffb4eecdb7d
|
a02c9c9d7142069df7c98671aad6061d605a7c41
|
/DFS/knock29.py
|
96311e01cbb12c361cd1fd7300eaf395cf920a7e
|
[] |
no_license
|
aktsan/Atcoder_100knocks
|
223f5f967bc5e02d2af62ca8495612b0a35de1fb
|
c017f091b43df8f930c979ed85ea65b0bba42102
|
refs/heads/main
| 2023-01-30T20:21:03.147590
| 2020-12-10T12:37:57
| 2020-12-10T12:37:57
| 311,620,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,669
|
py
|
from collections import deque
R,C = map(int,input().split())
sy,sx = map(int,input().split())
gy,gx = map(int,input().split())
#indexを揃えるため
stage = [['nemui']]+[[['a']]+list(input()) for _ in range(R)]
#時間を表す
time = 0
#探索済み
visited = []
#時間を記録する
vector = [[0] * (C+1) for _ in range(R+1)]
#探索方向
dir = [[0,1],[0,-1],[1,0],[-1,0]]
d = deque()
d.append([sy,sx])
vector[sy][sx] = time
cnt = 0
while d:
#探索点の取り出し
v = d.popleft()
#print('v:',v)
if v not in visited:
visited.append(v)
#時間を記録
#if vector[v[1]][v[0]] != 0:
# vector[v[1]][v[0]] = time
# time += 1
#print('visited:',visited)
for i in dir:
#print(i)
if v[1]+i[1] > R or v[0]+i[0] > C:
pass
else:
if stage[v[0]+i[0]][v[1]+i[1]] == '.':
#もう探索済みの物は追加しない
if [v[0]+i[0],v[1]+i[1]] not in visited:
if v[0]+i[0] == gy and v[1]+i[1] == gx:
#print('Yes')
print(vector[v[0]][v[1]] + 1)
exit()
#print('nemui',v[0]+i[0],v[1]+i[1])
d.append([v[0]+i[0],v[1]+i[1]])
for i in d:
#print('i',i)
if i not in visited:
#print(i)
vector[i[0]][i[1]] = vector[v[0]][v[1]] + 1
visited.append(i)
#print('after added d:',d)
#print(vector)
#print('############')
cnt += 1
#if v[1] == gy and v[0] == gx:
# print('Yes')
# exit()
#if cnt == 19:
# exit()
|
[
"aktbox6@yahoo.co.jp"
] |
aktbox6@yahoo.co.jp
|
ac870263f8705e0386c2f031c4c9df30f3c6981c
|
23e7fa782f9169b45d3d9c4fb3c8b06f0804ff1d
|
/aula8/MongoDB/MongoFunctions.py
|
796c89fd21afec8d2a35137a8790745473196297
|
[] |
no_license
|
rgleme/python_fundamentals
|
0a41847ba3b282d96355037650f2873312d77b3b
|
2dd7d4a376b8c79e6528aba7514e12d69a7a100d
|
refs/heads/master
| 2020-03-23T02:17:14.662863
| 2018-08-09T01:54:37
| 2018-08-09T01:54:37
| 140,966,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
#!/usr/bin/python
from pymongo import MongoClient
from datetime import datetime
def registrar_logs(login,ip):
try:
client = MongoClient("127.0.0.1")
db = client["admssh"]
db.logs.insert({"administrador":login,"servidor":ip,"data":datetime.now()})
except Exception as e:
print "Erro: %s"%e
def listar_ultimos_acessos():
try:
client = MongoClient("127.0.0.1")
db = client["admssh"]
db.logs.find({}).limit(5)
for l in db.logs.find({}).limit(5):
print l["administrador"]," - ",l["servidor"]," - ",l["data"]
except Exception as e:
print "Erro: %s"%e
|
[
"rodolfo43@gmail.com"
] |
rodolfo43@gmail.com
|
2f7ebcbf553e4d015196269d689ebb59930f2dd0
|
0cf3d67f4ed1a388fdcbeb150693279f75bb2ea2
|
/src/MuyPicky/settings/local.py
|
6df0fe5925f6090383ba3b66727b4eaf8577d7a9
|
[
"MIT"
] |
permissive
|
thomasrjones211/django2
|
8da991a4820b5846f454f5f3689ad1c41687551c
|
135db5052836236e3f94063828d7f029e7702819
|
refs/heads/master
| 2021-01-19T14:18:42.876804
| 2017-08-22T06:34:12
| 2017-08-22T06:34:12
| 100,896,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
"""
Django settings for MuyPicky project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pdi)jl@u0^$ae)$hq$5^$$(onj^=svu@eybe9cj#o$trp#2-&g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MuyPicky.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MuyPicky.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"thomasrjones211@gmail.com"
] |
thomasrjones211@gmail.com
|
6c8073ba6c13a556daa80ca42d57430f7be963db
|
6daffb0191a719ab62fef050759f0e99e44d6203
|
/Superbowllish_CNN.py
|
a57a21f5dcda6f97eaf7c78c7a84592915fc9081
|
[] |
no_license
|
Staytunedl/Superbowllish-CNN-
|
cdbe997efc6f3b4a2790af977d0daa373db2ab62
|
625679a8b61b585c3572693c72b9797021fa53a6
|
refs/heads/master
| 2020-07-09T18:49:57.332441
| 2019-08-23T19:01:08
| 2019-08-23T19:01:08
| 204,053,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,975
|
py
|
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
import tensorflow as tf
import os, cv2, re, random
from sklearn.model_selection import train_test_split
import pandas as pd
from keras import layers, models, optimizers
train_data_dir = 'superbowllsh/train/'
test_data_dir = 'superbowllsh/validation/test/'
train_images = [train_data_dir+i for i in os.listdir(train_data_dir)]
test_images = [test_data_dir+i for i in os.listdir(test_data_dir)]
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split('(\d+)', text) ]
train_images.sort(key=natural_keys)
test_images.sort(key=natural_keys)
# dimensions of our images.
img_width = 341
img_height = 256
def prepare_data(list_of_images):
x = [] # images as arrays
y = [] # labels
for image in list_of_images:
x.append(cv2.resize(cv2.imread(image), (img_width, img_height), interpolation=cv2.INTER_CUBIC))
for i in list_of_images:
if 'dirty' in i:
y.append(1)
elif 'cleaned' in i:
y.append(0)
return x, y
X, Y = prepare_data(train_images)
print(K.image_data_format())
print(train_images)
print(X)
print(Y)
X_train, X_val, Y_train, Y_val = train_test_split(X,Y, test_size=0.2, random_state=1)
nb_train_samples = len(X_train)
nb_validation_samples = len(X_val)
epochs = 50
batch_size = 16
input_shape = (img_height, img_width, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0,
zoom_range=0,
horizontal_flip=False)
val_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0,
zoom_range=0,
horizontal_flip=False)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow(np.array(X_train), (Y_train), batch_size=batch_size)
validation_generator = val_datagen.flow(np.array(X_val), (Y_val), batch_size=batch_size)
print(np.array(X_train).shape)
X_train = np.array(X_train)
history = model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // 16,
epochs=30,
validation_data=validation_generator,
validation_steps=30 // 16)
model.save('first_model.h5')
model.save_weights('first_weights.h5')
X_test, Y_test = prepare_data(test_images)
test_generator = val_datagen.flow(np.array(X_test), batch_size=22)
prediction_probabilities = model.predict_generator(test_generator, steps = 30 ,verbose=1)
print(len(prediction_probabilities))
counter = range(0, 660)
solution = pd.DataFrame({"id": counter, "label":list(prediction_probabilities)})
cols = ['label']
for col in cols:
solution[col] = solution[col].map(lambda x: str(x).lstrip('[').rstrip(']')).astype(float)
solution.to_csv("sample_submission.csv", index = False)
|
[
"noreply@github.com"
] |
noreply@github.com
|
1e6f857dbc02d3f5719868bdd2287c9d73f2ae8a
|
6c68f36343b9a177c7c3e7062bd9d8892abc9487
|
/python_tutorial/basic/base3.py
|
b49eef92fac28aaf8d09a4047674b8c278ad4d32
|
[] |
no_license
|
missasan/flask_python_tutorial
|
7373a90bc1b0a75d75ddca1b1d46ac5276f9710c
|
5bd9d541c084671653e53ee10a5432781d718a8c
|
refs/heads/main
| 2023-08-12T18:15:49.875356
| 2021-09-21T01:01:20
| 2021-09-21T01:01:20
| 379,757,392
| 0
| 0
| null | 2021-09-21T01:01:21
| 2021-06-24T00:02:35
|
Python
|
UTF-8
|
Python
| false
| false
| 230
|
py
|
# 論理型
is_animal = True
if is_animal:
print('動物です')
is_man = True
is_adult = True
# or文
if is_man or is_adult:
print('男か大人です')
# and文
if is_man and is_adult:
print('成人男性です')
|
[
"marukuteiimono@gmail.com"
] |
marukuteiimono@gmail.com
|
feacf8b21b75444ab105e20141abc0c070263ae5
|
b97795e2e4a397fff0f74b2221baa09a963b0864
|
/script/jupyter_setup.py
|
2db62d2550853a4a11315622bad5cda0eef4eb21
|
[
"MIT"
] |
permissive
|
lyltc1/ControlPractice
|
805537bf8d78821fb55977aaf7b01a83a215f38a
|
e88dd94494b178f98496b59125b35ccc5b08ccc7
|
refs/heads/master
| 2021-04-13T06:29:00.672881
| 2020-03-22T10:19:26
| 2020-03-22T10:19:26
| 249,143,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,630
|
py
|
import sys
import platform
from IPython import get_ipython
def setup_drake():
"""Install drake (if necessary) and set up the path.
On Google Colab:
This will take a minute, but should only need to reinstall once every 12
hours. Colab will ask you to "Reset all runtimes", say no to save yourself
the reinstall.
"""
try:
import pydrake
except ImportError:
if platform.system() is "Darwin":
get_ipython().system(
u"if [ ! -d '/opt/drake' ]; then curl -o drake.tar.gz https://drake-packages.csail.mit.edu/drake/continuous/drake-latest-mac.tar.gz && tar -xzf drake.tar.gz -C /opt && export HOMEBREW_CURL_RETRIES=4 && brew update && brew bundle --file=/opt/drake/share/drake/setup/Brewfile --no-lock; fi" # noqa
)
elif platform.linux_distribution() == ("Ubuntu", "18.04", "bionic"):
get_ipython().system(
u"if [ ! -d '/opt/drake' ]; then curl -o drake.tar.gz https://drake-packages.csail.mit.edu/drake/continuous/drake-latest-bionic.tar.gz && tar -xzf drake.tar.gz -C /opt &&apt-get update -o APT::Acquire::Retries=4 -qq && apt-get install -o APT::Acquire::Retries=4 -o Dpkg::Use-Pty=0 -qy --no-install-recommends $(cat /opt/drake/share/drake/setup/packages-bionic.txt); fi" # noqa
)
else:
assert False, "Unsupported platform"
v = sys.version_info
sys.path.append("/opt/drake/lib/python{}.{}/site-packages".format(
v.major, v.minor))
def setup_underactuated():
"""Install underactuated (if necessary) and set up the path.
On Google Colab:
This will take a minute, but should only need to reinstall once every 12
hours. Colab will ask you to "Reset all runtimes", say no to save yourself
the reinstall.
"""
setup_drake()
try:
import underactuated
except ImportError:
if platform.system() is "Darwin":
get_ipython().system(
u"if [ ! -d '/opt/underactuated' ]; then git clone https://github.com/lyltc1/underactuated.git /opt/underactuated && /opt/underactuated/scripts/setup/mac/install_prereqs; fi" # noqa
)
elif platform.linux_distribution() == ("Ubuntu", "18.04", "bionic"):
get_ipython().system(
u"if [ ! -d '/opt/underactuated' ]; then git clone https://github.com/lyltc1/underactuated.git /opt/underactuated && /opt/underactuated/scripts/setup/ubuntu/18.04/install_prereqs; fi" # noqa
)
else:
assert False, "Unsupported platform"
sys.path.append("/opt/underactuated")
|
[
"870767645@qq.com"
] |
870767645@qq.com
|
342e7acc88ea1e98b7fe31f2e5223d18837b7c17
|
c343239aa2f687da61266e8d4d640866c8a5edce
|
/2022-python/day06/solution.py
|
6486c623b29203aa077a4aff6381f771e041f9f8
|
[] |
no_license
|
erikiva/advent-of-code
|
e8f1b6fd7942d445834c7c8ed4a6e014d2cb7add
|
54443ed8b2dee7ccfc8c567d5d62c27b6d86be59
|
refs/heads/main
| 2023-01-10T01:45:14.236480
| 2022-12-25T21:24:25
| 2022-12-25T21:24:25
| 226,888,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
def part_both(data, length):
for i in range(0, len(data)):
if len(set(data[i:i+length])) == length:
return i+length
return 0
|
[
"natalia.vidal@automattic.com"
] |
natalia.vidal@automattic.com
|
fe0088ebf9df88a75d965d942d219349f3a68477
|
70bfe6d30059ea78f3e12921e1875773782453f6
|
/tencentcloud/vod/v20180717/errorcodes.py
|
2d03545d2145754adfdcd1419abaf7232659bc47
|
[
"Apache-2.0"
] |
permissive
|
sangliangliang/tencentcloud-sdk-python
|
d28e308df871fc6a94d3afb59f3365a6cc865f1c
|
82c7fc4da7f5131688fc01dc90d4465b7b3b41a2
|
refs/heads/master
| 2023-08-03T21:44:05.595225
| 2021-09-13T01:10:52
| 2021-09-13T01:10:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,847
|
py
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CAM签名/鉴权错误。
AUTHFAILURE = 'AuthFailure'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 操作失败:超过分类层数限制。
FAILEDOPERATION_CLASSLEVELLIMITEXCEEDED = 'FailedOperation.ClassLevelLimitExceeded'
# 操作失败:分类名称重复。
FAILEDOPERATION_CLASSNAMEDUPLICATE = 'FailedOperation.ClassNameDuplicate'
# 操作失败:分类不存在。
FAILEDOPERATION_CLASSNOFOUND = 'FailedOperation.ClassNoFound'
# 操作失败:不支持的封面类型。
FAILEDOPERATION_COVERTYPE = 'FailedOperation.CoverType'
# 用户账户异常。
FAILEDOPERATION_INVALIDACCOUNT = 'FailedOperation.InvalidAccount'
# 没有开通点播业务。
FAILEDOPERATION_INVALIDVODUSER = 'FailedOperation.InvalidVodUser'
# 媒体被系统封禁。
FAILEDOPERATION_MEDIAFORBIDEDBYSYSTEM = 'FailedOperation.MediaForbidedBySystem'
# 操作失败:不支持的媒体类型。
FAILEDOPERATION_MEDIATYPE = 'FailedOperation.MediaType'
# 网络错误。
FAILEDOPERATION_NETWORKERROR = 'FailedOperation.NetWorkError'
# 没有开通该接口使用权限。
FAILEDOPERATION_NOPRIVILEGES = 'FailedOperation.NoPrivileges'
# 操作失败:父类 ID 不存在。
FAILEDOPERATION_PARENTIDNOFOUND = 'FailedOperation.ParentIdNoFound'
# 操作失败:子类数量超过限制。
FAILEDOPERATION_SUBCLASSLIMITEXCEEDED = 'FailedOperation.SubclassLimitExceeded'
# 操作失败:任务重复。
FAILEDOPERATION_TASKDUPLICATE = 'FailedOperation.TaskDuplicate'
# 操作失败:上传文件到 cos 失败。
FAILEDOPERATION_UPLOADCOSFAIL = 'FailedOperation.UploadCosFail'
# 内部错误。
INTERNALERROR = 'InternalError'
# 内部错误,访问DB失败。
INTERNALERROR_DBERROR = 'InternalError.DBError'
# 内部错误:生成模板 ID 失败。
INTERNALERROR_GENDEFINITION = 'InternalError.GenDefinition'
# 内部错误:获取媒体文件信息错误。
INTERNALERROR_GETFILEINFOERROR = 'InternalError.GetFileInfoError'
# 内部错误:获取媒体列表错误。
INTERNALERROR_GETMEDIALISTERROR = 'InternalError.GetMediaListError'
# 时间解析错误。
INTERNALERROR_TIMEPARSEERROR = 'InternalError.TimeParseError'
# 内部错误:更新媒体文件信息错误。
INTERNALERROR_UPDATEMEDIAERROR = 'InternalError.UpdateMediaError'
# 内部错误:上传封面图片错误。
INTERNALERROR_UPLOADCOVERIMAGEERROR = 'InternalError.UploadCoverImageError'
# 内部错误:上传水印图片失败。
INTERNALERROR_UPLOADWATERMARKERROR = 'InternalError.UploadWatermarkError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 任务流模板名字已存在。
INVALIDPARAMETER_EXISTEDPROCEDURENAME = 'InvalidParameter.ExistedProcedureName'
# 参数值错误:过期时间。
INVALIDPARAMETER_EXPIRETIME = 'InvalidParameter.ExpireTime'
# 任务流模板名字不存在。
INVALIDPARAMETER_PROCEDURENAMENOTEXIST = 'InvalidParameter.ProcedureNameNotExist'
# 参数值错误:存储地域。
INVALIDPARAMETER_STORAGEREGION = 'InvalidParameter.StorageRegion'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 参数值错误:AddKeyFrameDescs 与 ClearKeyFrameDescs 参数冲突。
INVALIDPARAMETERVALUE_ADDKEYFRAMEDESCSANDCLEARKEYFRAMEDESCSCONFLICT = 'InvalidParameterValue.AddKeyFrameDescsAndClearKeyFrameDescsConflict'
# 参数值错误:AddKeyFrameDescs 与 DeleteKeyFrameDescs 参数冲突。
INVALIDPARAMETERVALUE_ADDKEYFRAMEDESCSANDDELETEKEYFRAMEDESCSCONFLICT = 'InvalidParameterValue.AddKeyFrameDescsAndDeleteKeyFrameDescsConflict'
# 参数值错误:AddTags 与 ClearTags 参数冲突。
INVALIDPARAMETERVALUE_ADDTAGSANDCLEARTAGSCONFLICT = 'InvalidParameterValue.AddTagsAndClearTagsConflict'
# 参数值错误:AddTags 与 DeleteTags 参数冲突。
INVALIDPARAMETERVALUE_ADDTAGSANDDELETETAGSCONFLICT = 'InvalidParameterValue.AddTagsAndDeleteTagsConflict'
# 参数值错误:AI 分析 Definition。
INVALIDPARAMETERVALUE_AIANALYSISTASKDEFINITION = 'InvalidParameterValue.AiAnalysisTaskDefinition'
# 参数值错误:AI 内容审核 Definition。
INVALIDPARAMETERVALUE_AICONTENTREVIEWTASKDEFINITION = 'InvalidParameterValue.AiContentReviewTaskDefinition'
# 参数值错误:AI 识别 Definition。
INVALIDPARAMETERVALUE_AIRECOGNITIONTASKDEFINITION = 'InvalidParameterValue.AiRecognitionTaskDefinition'
# Area 参数错误。
INVALIDPARAMETERVALUE_AREA = 'InvalidParameterValue.Area'
# 参数错误:音频流码率。
INVALIDPARAMETERVALUE_AUDIOBITRATE = 'InvalidParameterValue.AudioBitrate'
# 参数值错误:AudioChannel。
INVALIDPARAMETERVALUE_AUDIOCHANNEL = 'InvalidParameterValue.AudioChannel'
# 参数错误:音频流编码格式。
INVALIDPARAMETERVALUE_AUDIOCODEC = 'InvalidParameterValue.AudioCodec'
# 参数错误:音频流采样率。
INVALIDPARAMETERVALUE_AUDIOSAMPLERATE = 'InvalidParameterValue.AudioSampleRate'
# 无效的音频/视频码率。
INVALIDPARAMETERVALUE_BITRATE = 'InvalidParameterValue.Bitrate'
# 参数值错误:BlockConfidence 参数取值非法。
INVALIDPARAMETERVALUE_BLOCKCONFIDENCE = 'InvalidParameterValue.BlockConfidence'
# 无效的文件类型。
INVALIDPARAMETERVALUE_CATEGORIES = 'InvalidParameterValue.Categories'
# 参数值错误:分类 ID。
INVALIDPARAMETERVALUE_CLASSID = 'InvalidParameterValue.ClassId'
# 参数值错误:ClassIds 无效。
INVALIDPARAMETERVALUE_CLASSIDS = 'InvalidParameterValue.ClassIds'
# 参数值错误:ClassName 无效。
INVALIDPARAMETERVALUE_CLASSNAME = 'InvalidParameterValue.ClassName'
# 智能分类控制字段参数错误。
INVALIDPARAMETERVALUE_CLASSIFCATIONCONFIGURE = 'InvalidParameterValue.ClassifcationConfigure'
# 参数值错误:裁剪时间段太长。
INVALIDPARAMETERVALUE_CLIPDURATION = 'InvalidParameterValue.ClipDuration'
# 无效的音频/视频编编码格式。
INVALIDPARAMETERVALUE_CODEC = 'InvalidParameterValue.Codec'
# 参数值错误:ColumnCount。
INVALIDPARAMETERVALUE_COLUMNCOUNT = 'InvalidParameterValue.ColumnCount'
# 参数错误:对该模板的描述。
INVALIDPARAMETERVALUE_COMMENT = 'InvalidParameterValue.Comment'
# 参数错误:封装格式。
INVALIDPARAMETERVALUE_CONTAINER = 'InvalidParameterValue.Container'
# 参数值错误:ContainerType。
INVALIDPARAMETERVALUE_CONTAINERTYPE = 'InvalidParameterValue.ContainerType'
# 参数值错误:CoordinateOrigin。
INVALIDPARAMETERVALUE_COORDINATEORIGIN = 'InvalidParameterValue.CoordinateOrigin'
# 智能封面控制字段参数错误。
INVALIDPARAMETERVALUE_COVERCONFIGURE = 'InvalidParameterValue.CoverConfigure'
# 参数值错误:封面类型。
INVALIDPARAMETERVALUE_COVERTYPE = 'InvalidParameterValue.CoverType'
# 参数值错误:封面 URL。
INVALIDPARAMETERVALUE_COVERURL = 'InvalidParameterValue.CoverUrl'
# 参数值错误:CutAndCrops 参数取值非法。
INVALIDPARAMETERVALUE_CUTANDCROPS = 'InvalidParameterValue.CutAndCrops'
# 参数值错误,时间粒度。
INVALIDPARAMETERVALUE_DATAINTERVAL = 'InvalidParameterValue.DataInterval'
# 参数值错误,数据类型。
INVALIDPARAMETERVALUE_DATATYPE = 'InvalidParameterValue.DataType'
# 参数值错误:Date。
INVALIDPARAMETERVALUE_DATE = 'InvalidParameterValue.Date'
# 参数值错误:人脸默认库过滤标签非法。
INVALIDPARAMETERVALUE_DEFAULTLIBRARYLABELSET = 'InvalidParameterValue.DefaultLibraryLabelSet'
# 参数错误:Definition。
INVALIDPARAMETERVALUE_DEFINITION = 'InvalidParameterValue.Definition'
# 参数错误:Definitions。
INVALIDPARAMETERVALUE_DEFINITIONS = 'InvalidParameterValue.Definitions'
# 参数值错误:不允许删除默认模板。
INVALIDPARAMETERVALUE_DELETEDEFAULTTEMPLATE = 'InvalidParameterValue.DeleteDefaultTemplate'
# 参数值错误:Description 超过长度限制。
INVALIDPARAMETERVALUE_DESCRIPTION = 'InvalidParameterValue.Description'
# 无效的禁止码率低转高开关值。
INVALIDPARAMETERVALUE_DISABLEHIGHERVIDEOBITRATE = 'InvalidParameterValue.DisableHigherVideoBitrate'
# 无效的禁止分辨率低转高开关值。
INVALIDPARAMETERVALUE_DISABLEHIGHERVIDEORESOLUTION = 'InvalidParameterValue.DisableHigherVideoResolution'
# Districts 参数值错误。
INVALIDPARAMETERVALUE_DISTRICTS = 'InvalidParameterValue.Districts'
# 参数错误:不存在的域名。
INVALIDPARAMETERVALUE_DOMAINNAME = 'InvalidParameterValue.DomainName'
# 参数值,域名列表太大。
INVALIDPARAMETERVALUE_DOMAINNAMES = 'InvalidParameterValue.DomainNames'
# 无效的DRM类型。
INVALIDPARAMETERVALUE_DRMTYPE = 'InvalidParameterValue.DrmType'
# 参数值错误:EndDate 无效。
INVALIDPARAMETERVALUE_ENDDATE = 'InvalidParameterValue.EndDate'
# 参数值错误:EndTime 无效。
INVALIDPARAMETERVALUE_ENDTIME = 'InvalidParameterValue.EndTime'
# 参数错误:无效的结束时间。
INVALIDPARAMETERVALUE_ENDTIMEOFFSET = 'InvalidParameterValue.EndTimeOffset'
# 参数值错误:ExpireTime 格式错误。
INVALIDPARAMETERVALUE_EXPIRETIME = 'InvalidParameterValue.ExpireTime'
# 参数值错误:人脸重复。
INVALIDPARAMETERVALUE_FACEDUPLICATE = 'InvalidParameterValue.FaceDuplicate'
# 参数值错误:人脸库参数非法。
INVALIDPARAMETERVALUE_FACELIBRARY = 'InvalidParameterValue.FaceLibrary'
# 参数值错误:人脸分数参数取值非法。
INVALIDPARAMETERVALUE_FACESCORE = 'InvalidParameterValue.FaceScore'
# FileId 不存在。
INVALIDPARAMETERVALUE_FILEID = 'InvalidParameterValue.FileId'
# FileIds 参数错误。
INVALIDPARAMETERVALUE_FILEIDS = 'InvalidParameterValue.FileIds'
# FileIds 数组为空。
INVALIDPARAMETERVALUE_FILEIDSEMPTY = 'InvalidParameterValue.FileIdsEmpty'
# 参数值错误:FileId 过多。
INVALIDPARAMETERVALUE_FILEIDSTOOMANY = 'InvalidParameterValue.FileIdsTooMany'
# 错误的视频类型。
INVALIDPARAMETERVALUE_FILETYPE = 'InvalidParameterValue.FileType'
# 参数错误:填充方式错误。
INVALIDPARAMETERVALUE_FILLTYPE = 'InvalidParameterValue.FillType'
# 参数错误:是否去除视频,应为0或1。
INVALIDPARAMETERVALUE_FILTRATEAUDIO = 'InvalidParameterValue.FiltrateAudio'
# 参数错误:去除视频。
INVALIDPARAMETERVALUE_FILTRATEVIDEO = 'InvalidParameterValue.FiltrateVideo'
# 参数值错误:Format。
INVALIDPARAMETERVALUE_FORMAT = 'InvalidParameterValue.Format'
# 参数值错误:Format 为 webp 时,Width、Height 均为空。
INVALIDPARAMETERVALUE_FORMATWEBPLACKWIDTHANDHEIGHT = 'InvalidParameterValue.FormatWebpLackWidthAndHeight'
# 参数值错误:Format 为 webp 时,不允许 Width、Height 都为 0。
INVALIDPARAMETERVALUE_FORMATWEBPWIDTHANDHEIGHTBOTHZERO = 'InvalidParameterValue.FormatWebpWidthAndHeightBothZero'
# 参数错误:视频帧率。
INVALIDPARAMETERVALUE_FPS = 'InvalidParameterValue.Fps'
# 智能按帧标签控制字段参数错误。
INVALIDPARAMETERVALUE_FRAMETAGCONFIGURE = 'InvalidParameterValue.FrameTagConfigure'
# 参数值错误:FunctionArg。
INVALIDPARAMETERVALUE_FUNCTIONARG = 'InvalidParameterValue.FunctionArg'
# 参数值错误:FunctionName。
INVALIDPARAMETERVALUE_FUNCTIONNAME = 'InvalidParameterValue.FunctionName'
# 参数错误:高度。
INVALIDPARAMETERVALUE_HEIGHT = 'InvalidParameterValue.Height'
# 智能精彩集锦控制参数错误。
INVALIDPARAMETERVALUE_HIGHLIGHTCONFIGURE = 'InvalidParameterValue.HighlightConfigure'
# ImageContent参数值无效。
INVALIDPARAMETERVALUE_IMAGECONTENT = 'InvalidParameterValue.ImageContent'
# 图片解 Base64 编码失败。
INVALIDPARAMETERVALUE_IMAGEDECODEERROR = 'InvalidParameterValue.ImageDecodeError'
# 参数错误:图片水印模板。
INVALIDPARAMETERVALUE_IMAGETEMPLATE = 'InvalidParameterValue.ImageTemplate'
# 参数错误:无效的操作类型。
INVALIDPARAMETERVALUE_INVALIDOPERATIONTYPE = 'InvalidParameterValue.InvalidOperationType'
# Isps 参数错误。
INVALIDPARAMETERVALUE_ISPS = 'InvalidParameterValue.Isps'
# 参数值错误:打点信息内容过长。
INVALIDPARAMETERVALUE_KEYFRAMEDESCCONTENTTOOLONG = 'InvalidParameterValue.KeyFrameDescContentTooLong'
# 参数值错误:LabelSet 参数取值非法。
INVALIDPARAMETERVALUE_LABELSET = 'InvalidParameterValue.LabelSet'
# 参数错误:Limit。
INVALIDPARAMETERVALUE_LIMIT = 'InvalidParameterValue.Limit'
# 参数值错误:Limit 过大。
INVALIDPARAMETERVALUE_LIMITTOOLARGE = 'InvalidParameterValue.LimitTooLarge'
# 参数取值错误:MediaManifestContent。
INVALIDPARAMETERVALUE_MEDIAMANIFESTCONTENT = 'InvalidParameterValue.MediaManifestContent'
# 参数值错误:媒体类型。
INVALIDPARAMETERVALUE_MEDIATYPE = 'InvalidParameterValue.MediaType'
# 参数值错误:媒体文件 URL。
INVALIDPARAMETERVALUE_MEDIAURL = 'InvalidParameterValue.MediaUrl'
# Metric 参数错误。
INVALIDPARAMETERVALUE_METRIC = 'InvalidParameterValue.Metric'
# 参数值错误:不允许修改默认模板。
INVALIDPARAMETERVALUE_MODIFYDEFAULTTEMPLATE = 'InvalidParameterValue.ModifyDefaultTemplate'
# 参数值错误:Name 超过长度限制。
INVALIDPARAMETERVALUE_NAME = 'InvalidParameterValue.Name'
# 无效的文件名前缀。
INVALIDPARAMETERVALUE_NAMEPREFIXES = 'InvalidParameterValue.NamePrefixes'
# Names数组中元素过多。
INVALIDPARAMETERVALUE_NAMES = 'InvalidParameterValue.Names'
# 参数值错误:物体库参数非法。
INVALIDPARAMETERVALUE_OBJECTLIBRARY = 'InvalidParameterValue.ObjectLibrary'
# 参数值错误:Offset 无效。
INVALIDPARAMETERVALUE_OFFSET = 'InvalidParameterValue.Offset'
# 参数值错误:Offset 过大。
INVALIDPARAMETERVALUE_OFFSETTOOLARGE = 'InvalidParameterValue.OffsetTooLarge'
# 参数值错误:Operation 无效。
INVALIDPARAMETERVALUE_OPERATION = 'InvalidParameterValue.Operation'
# 参数值错误:ParentId 无效。
INVALIDPARAMETERVALUE_PARENTID = 'InvalidParameterValue.ParentId'
# 参数值错误:人脸图片格式错误。
INVALIDPARAMETERVALUE_PICFORMATERROR = 'InvalidParameterValue.PicFormatError'
# 任务流模板名无效。
INVALIDPARAMETERVALUE_PROCEDURENAME = 'InvalidParameterValue.ProcedureName'
# 参数值错误:Quality。
INVALIDPARAMETERVALUE_QUALITY = 'InvalidParameterValue.Quality'
# 参数值错误:RemoveAudio。
INVALIDPARAMETERVALUE_REMOVEAUDIO = 'InvalidParameterValue.RemoveAudio'
# 参数值错误:RemoveVideo。
INVALIDPARAMETERVALUE_REMOVEVIDEO = 'InvalidParameterValue.RemoveVideo'
# 参数错误:RepeatType 无效。
INVALIDPARAMETERVALUE_REPEATTYPE = 'InvalidParameterValue.RepeatType'
# 参数错误:分辨率错误。
INVALIDPARAMETERVALUE_RESOLUTION = 'InvalidParameterValue.Resolution'
# 无效的ResolutionAdaptive。
INVALIDPARAMETERVALUE_RESOLUTIONADAPTIVE = 'InvalidParameterValue.ResolutionAdaptive'
# 参数值错误:ReviewConfidence 参数取值非法。
INVALIDPARAMETERVALUE_REVIEWCONFIDENCE = 'InvalidParameterValue.ReviewConfidence'
# 参数值错误:ReviewWallSwitch 参数取值非法。
INVALIDPARAMETERVALUE_REVIEWWALLSWITCH = 'InvalidParameterValue.ReviewWallSwitch'
# 参数值错误:RowCount。
INVALIDPARAMETERVALUE_ROWCOUNT = 'InvalidParameterValue.RowCount'
# 参数值错误:SampleInterval。
INVALIDPARAMETERVALUE_SAMPLEINTERVAL = 'InvalidParameterValue.SampleInterval'
# 无效的音频采样率。
INVALIDPARAMETERVALUE_SAMPLERATE = 'InvalidParameterValue.SampleRate'
# 参数值错误:SampleType。
INVALIDPARAMETERVALUE_SAMPLETYPE = 'InvalidParameterValue.SampleType'
# 参数值错误:ScreenshotInterval 参数取值非法。
INVALIDPARAMETERVALUE_SCREENSHOTINTERVAL = 'InvalidParameterValue.ScreenshotInterval'
# SessionContext 过长。
INVALIDPARAMETERVALUE_SESSIONCONTEXTTOOLONG = 'InvalidParameterValue.SessionContextTooLong'
# 去重识别码重复,请求被去重。
INVALIDPARAMETERVALUE_SESSIONID = 'InvalidParameterValue.SessionId'
# SessionId 过长。
INVALIDPARAMETERVALUE_SESSIONIDTOOLONG = 'InvalidParameterValue.SessionIdTooLong'
# 参数值错误:Sort 无效。
INVALIDPARAMETERVALUE_SORT = 'InvalidParameterValue.Sort'
# 参数错误:音频通道方式。
INVALIDPARAMETERVALUE_SOUNDSYSTEM = 'InvalidParameterValue.SoundSystem'
# SourceDefinition 错误,请检查媒体文件是否有对应的转码。
INVALIDPARAMETERVALUE_SOURCEDEFINITION = 'InvalidParameterValue.SourceDefinition'
# 参数值错误:SourceType 无效。
INVALIDPARAMETERVALUE_SOURCETYPE = 'InvalidParameterValue.SourceType'
# 未知的媒体文件来源。
INVALIDPARAMETERVALUE_SOURCETYPES = 'InvalidParameterValue.SourceTypes'
# 参数值错误:StartDate 无效。
INVALIDPARAMETERVALUE_STARTDATE = 'InvalidParameterValue.StartDate'
# 参数值错误:StartTime 无效。
INVALIDPARAMETERVALUE_STARTTIME = 'InvalidParameterValue.StartTime'
# 参数错误:无效的起始时间。
INVALIDPARAMETERVALUE_STARTTIMEOFFSET = 'InvalidParameterValue.StartTimeOffset'
# 参数值错误:人工确认结果取值非法。
INVALIDPARAMETERVALUE_STATUS = 'InvalidParameterValue.Status'
# 参数值错误:存储地域。
INVALIDPARAMETERVALUE_STORAGEREGION = 'InvalidParameterValue.StorageRegion'
# 参数值错误:StorageRegions 无效。
INVALIDPARAMETERVALUE_STORAGEREGIONS = 'InvalidParameterValue.StorageRegions'
# 参数值错误:StorageType。
INVALIDPARAMETERVALUE_STORAGETYPE = 'InvalidParameterValue.StorageType'
# 参数值错误:StreamId无效。
INVALIDPARAMETERVALUE_STREAMIDINVALID = 'InvalidParameterValue.StreamIdInvalid'
# 无效的流ID参数。
INVALIDPARAMETERVALUE_STREAMIDS = 'InvalidParameterValue.StreamIds'
# 参数值错误:子应用 ID。
INVALIDPARAMETERVALUE_SUBAPPID = 'InvalidParameterValue.SubAppId'
# 参数值错误:SubtitleFormat 参数非法。
INVALIDPARAMETERVALUE_SUBTITLEFORMAT = 'InvalidParameterValue.SubtitleFormat'
# 参数值错误:SVG 为空。
INVALIDPARAMETERVALUE_SVGTEMPLATE = 'InvalidParameterValue.SvgTemplate'
# 参数值错误:SVG 高度。
INVALIDPARAMETERVALUE_SVGTEMPLATEHEIGHT = 'InvalidParameterValue.SvgTemplateHeight'
# 参数值错误:SVG 宽度。
INVALIDPARAMETERVALUE_SVGTEMPLATEWIDTH = 'InvalidParameterValue.SvgTemplateWidth'
# 参数值错误:Switch 参数取值非法。
INVALIDPARAMETERVALUE_SWITCH = 'InvalidParameterValue.Switch'
# 参数值错误:TEHD Type 无效。
INVALIDPARAMETERVALUE_TEHDTYPE = 'InvalidParameterValue.TEHDType'
# 智能标签控制字段参数错误。
INVALIDPARAMETERVALUE_TAGCONFIGURE = 'InvalidParameterValue.TagConfigure'
# 参数值错误:标签过长。
INVALIDPARAMETERVALUE_TAGTOOLONG = 'InvalidParameterValue.TagTooLong'
# 参数值错误:Tags 无效。
INVALIDPARAMETERVALUE_TAGS = 'InvalidParameterValue.Tags'
# 任务 ID 不存在。
INVALIDPARAMETERVALUE_TASKID = 'InvalidParameterValue.TaskId'
# 参数值错误:搜索文本。
INVALIDPARAMETERVALUE_TEXT = 'InvalidParameterValue.Text'
# 参数错误:文字透明度。
INVALIDPARAMETERVALUE_TEXTALPHA = 'InvalidParameterValue.TextAlpha'
# 参数错误:文字模板。
INVALIDPARAMETERVALUE_TEXTTEMPLATE = 'InvalidParameterValue.TextTemplate'
# 参数值错误:Thumbnail 参数取值非法。
INVALIDPARAMETERVALUE_THUMBNAILS = 'InvalidParameterValue.Thumbnails'
# 参数值错误:TimeType。
INVALIDPARAMETERVALUE_TIMETYPE = 'InvalidParameterValue.TimeType'
# Type 参数值错误。
INVALIDPARAMETERVALUE_TYPE = 'InvalidParameterValue.Type'
# 无效的 Types 参数。
INVALIDPARAMETERVALUE_TYPES = 'InvalidParameterValue.Types'
# 去重识别码一天内重复,请求被去重。
INVALIDPARAMETERVALUE_UNIQUEIDENTIFIER = 'InvalidParameterValue.UniqueIdentifier'
# 参数错误:无效的Url。
INVALIDPARAMETERVALUE_URL = 'InvalidParameterValue.Url'
# 参数值错误:人脸用户自定义库过滤标签非法。
INVALIDPARAMETERVALUE_USERDEFINELIBRARYLABELSET = 'InvalidParameterValue.UserDefineLibraryLabelSet'
# 参数错误:vcrf。
INVALIDPARAMETERVALUE_VCRF = 'InvalidParameterValue.Vcrf'
# 参数错误:视频流码率。
INVALIDPARAMETERVALUE_VIDEOBITRATE = 'InvalidParameterValue.VideoBitrate'
# 参数错误:视频流的编码格式。
INVALIDPARAMETERVALUE_VIDEOCODEC = 'InvalidParameterValue.VideoCodec'
# 无效的 Vids 参数。
INVALIDPARAMETERVALUE_VIDS = 'InvalidParameterValue.Vids'
# 参数值错误:点播会话。
INVALIDPARAMETERVALUE_VODSESSIONKEY = 'InvalidParameterValue.VodSessionKey'
# 参数值错误:Watermarks 参数取值非法。
INVALIDPARAMETERVALUE_WATERMARKS = 'InvalidParameterValue.Watermarks'
# 参数错误:宽度。
INVALIDPARAMETERVALUE_WIDTH = 'InvalidParameterValue.Width'
# 水印原点距离视频图像坐标原点的水平位置。支持 %、px 两种格式。
INVALIDPARAMETERVALUE_XPOS = 'InvalidParameterValue.XPos'
# 水印原点距离视频图像坐标原点的垂直位置。支持 %、px 两种格式。
INVALIDPARAMETERVALUE_YPOS = 'InvalidParameterValue.YPos'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 超过限制值:新旧打点信息个数和超过限制值。
LIMITEXCEEDED_KEYFRAMEDESCCOUNTREACHMAX = 'LimitExceeded.KeyFrameDescCountReachMax'
# 超过限制值:新旧标签个数和超过限制值。
LIMITEXCEEDED_TAGCOUNTREACHMAX = 'LimitExceeded.TagCountReachMax'
# 超过限制值:模板数超限。
LIMITEXCEEDED_TOOMUCHTEMPLATE = 'LimitExceeded.TooMuchTemplate'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 资源不存在:封面不存在。
RESOURCENOTFOUND_COVERURL = 'ResourceNotFound.CoverUrl'
# 资源不存在:文件不存在。
RESOURCENOTFOUND_FILENOTEXIST = 'ResourceNotFound.FileNotExist'
# 资源不存在:人物。
RESOURCENOTFOUND_PERSON = 'ResourceNotFound.Person'
# 资源不存在:模板不存在。
RESOURCENOTFOUND_TEMPLATENOTEXIST = 'ResourceNotFound.TemplateNotExist'
# 用户不存在。
RESOURCENOTFOUND_USERNOTEXIST = 'ResourceNotFound.UserNotExist'
# 资源不存在:关键词。
RESOURCENOTFOUND_WORD = 'ResourceNotFound.Word'
# 参数错误:不支持MasterPlaylist的M3u8。
RESOURCEUNAVAILABLE_MASTERPLAYLIST = 'ResourceUnavailable.MasterPlaylist'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
# 不支持删除非空分类。
UNSUPPORTEDOPERATION_CLASSNOTEMPTY = 'UnsupportedOperation.ClassNotEmpty'
|
[
"tencentcloudapi@tenent.com"
] |
tencentcloudapi@tenent.com
|
71ba4ee7dbdb38f9f5e41c9b92d886fda6729209
|
91c7de67e656fec2b9c32b64e1b6ae88083a0283
|
/functional_tests/test_simple_list_creation.py
|
f5aee3c61fd7a18d274cbbaf40fa57f4feb504f4
|
[] |
no_license
|
pohily/TDD
|
e0a85c60c5ee2e7388323ffb00b7fe81372431c1
|
60d2a0f9debfcc22be54d85e981aee23f8113563
|
refs/heads/master
| 2022-05-04T20:07:46.296627
| 2019-07-24T11:57:19
| 2019-07-24T11:57:19
| 189,567,223
| 0
| 0
| null | 2022-04-22T21:23:44
| 2019-05-31T09:28:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,307
|
py
|
from .base import FunctionalTest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class NewVisitorTest(FunctionalTest):
def test_can_start_a_list_for_one_user(self):
# Edith has heard about a cool new online to-do app. She goes
# to check out its homepage
self.browser.get(self.live_server_url)
# She notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a to-do item straight away
inputbox = self.get_item_input_box()
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
inputbox.send_keys('Buy peacock feathers')
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list table
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very
# methodical)
self.add_list_item('Use peacock feathers to make a fly')
# The page updates again, and now shows both items on her list
self.wait_for_row_in_list_table('2: Use peacock feathers to make a fly')
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# Satisfied, she goes back to sleep
def test_multiple_users_can_start_lists_at_different_urls(self):
# Edith starts a new to-do list
self.browser.get(self.live_server_url)
self.add_list_item('Buy peacock feathers')
# She notices that her list has a unique URL
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
# Now a new user, Francis, comes along to the site.
## We use a new browser session to make sure that no information
## of Edith's is coming through from cookies etc
self.browser.quit()
self.browser = webdriver.Firefox()
# Francis visits the home page. There is no sign of Edith's
# list
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# Francis starts a new list by entering a new item. He
# is less interesting than Edith...
self.add_list_item('Buy milk')
# Francis gets his own unique URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/.+')
self.assertNotEqual(francis_list_url, edith_list_url)
# Again, there is no trace of Edith's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# Satisfied, they both go back to sleep
|
[
"mpohily@gmail.com"
] |
mpohily@gmail.com
|
713275915abef8843f8041d6f606da3ed88339b9
|
f77593e9e9a112e85acd3c73c056a7466d76e15e
|
/request_delivery_installation/request_delivery_installation/urls.py
|
d15cc80688686b4ea06f1692684c43314ce8d0e5
|
[] |
no_license
|
geethusuresh/reqest_installation
|
bf47c915aee1e1f7730ea858c000a6dd434a79fb
|
d047fa9f303273915651d0cbe03b7795f157f31c
|
refs/heads/master
| 2021-01-25T04:09:10.282831
| 2014-09-28T06:40:10
| 2014-09-28T06:40:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,498
|
py
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib import admin
admin.autodiscover()
from web.views import *
urlpatterns = patterns('',
url(r'^$', login_required(Home.as_view()), name='home'),
url(r'^accounts/login/$', Login.as_view(), name='login'),
url(r'^logout/$', Logout.as_view(), name='logout'),
url(r'^register/$', login_required(Signup.as_view()), name='register'),
url(r'^dealer/(?P<user_id>[\d+]+)/add/subdealer/$',login_required(AddSubDealer.as_view()), name="add_subdealer"),
url(r'^add_purchase_info/$', login_required(AddPurchanseInfo.as_view()), name='add_purchase_info'),
url(r'^fetch_brand_names/$', FetchBrandNames.as_view(), name='fetch_brand_names'),
url(r'^fetch_purchase_sales_men/$', FetchPurchaseSalesManList.as_view(), name='fetch_purchase_sales_men'),
url(r'^fetch_dealers/$', FetchDealersList.as_view(), name='fetch_dealers'),
url(r'^purchase_info/(?P<purchase_info_id>[\d+]+)/$', login_required(PurchaseInfoView.as_view()), name='purchase_info'),
url(r'^search_purchase_info/(?P<delivery_order_number>[\w-]+)/$', login_required(SearchPurchaseInfo.as_view()), name="search_purchase_info"),
url(r'^fetch_dealer_company_names/$', FetchFirmNames.as_view(), name='fetch_firm_names'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"geethu@technomicssolutions.com"
] |
geethu@technomicssolutions.com
|
6a02fca6a9a04cae32ecd20d9968794b9f89b69b
|
23e41c63a6f5d0a5d7491c3b2cf9a1d9b1e1653a
|
/active_subnets.py
|
c570d2ae6190e12a4a79278ef81337f5fa2c2fc8
|
[] |
no_license
|
feabell/sanity_parser
|
0568c7ef3ef1759ad5f0e23976da7d208e761bd5
|
38aa5945c2bb7aeda1b86f5add3ebb8f892f3dc0
|
refs/heads/master
| 2020-03-06T20:26:15.208101
| 2018-03-27T22:30:00
| 2018-03-27T22:30:00
| 127,053,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
import requests
import re
import os
user = ''
password = ''
baseurl = 'https://sanity.vodafone.co.nz/'
def get_leaf_from_tree(pageurl, leaflist):
r = requests.get(baseurl + pageurl, auth=(user, password))
if 'ip_tree.php' in r.text or 'subnet_tree.php' in r.text:
leaflines = r.text.splitlines()
for line in leaflines:
if "ip_tree.php" in line:
ips = re.findall('action="/(ip_tree.php\?subnet=[0-9]*)', line)
for ip in ips:
leaflist.append(ip)
#leaflist.append(re.findall('action="/(ip_tree.php\?subnet=[0-9]*)', line))
if "subnet_tree.php" in line:
get_leaf_from_tree('\n'.join(re.findall('src="(/subnet_tree.php\?parent=[0-9]*)', line)), leaflist)
# leafs = re.findall('action="/(ip_tree.php\?subnet=[0-9]*)', r.text)
# for leaf in leafs:
# leaflist.append(leaf)
# if 'subnet_tree.php' in r.text:
# tree = re.findall('src="(/subnet_tree.php\?parent=[0-9]*)', r.text)
# for entry in tree:
# get_leaf_from_tree(entry, leaflist)
return
def get_ips(pageurl):
r = requests.get(baseurl + pageurl, auth=(user, password))
hosts = []
if 'tree.add' in r.text:
#hostsblob = re.findall(r'tree.add\(new WebFXTreeItem\(\'([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}).*/edit_host.php', r.text)
hostslines = r.text.splitlines()
for line in hostslines:
if "edit_host.php" in line:
hosts.append(re.findall(r'WebFXTreeItem\(\'([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})', line))
return hosts
r = requests.get(baseurl + 'user_ip_register.php', auth=(user, password))
if 'tree.add' in r.text:
top_level = re.findall('tree.add\(new WebFXLoadTreeItem\(\'(.*)\', \'(.*)\'', r.text)
for vrf in top_level:
#make a directory for this VRF
os.mkdir(vrf[0].replace('/', '.'))
#walk the tree and get all the ip_tree.php entries
r = requests.get(baseurl + vrf[1])
subnets = []
get_leaf_from_tree(vrf[1], subnets)
#write a targets.txt file in the subdirectory
f= open(vrf[0].replace('/', '.') + "/targets.txt","w+")
#get the active IP's from the leafs
for subnet in subnets:
ips=get_ips(subnet)
print("======= " + vrf[0] + " ========")
print(ips)
print('\n\n')
for ip in ips:
f.write('\n'.join(ip)+'\n')
f.close()
|
[
"feabell@gmail.com"
] |
feabell@gmail.com
|
d27c15c3aeb48b1eaf584f35d1acd2898bc4befa
|
128c6a5f23460b4352776685855485b90cedb8e7
|
/src/sleuth/lingo/.svn/text-base/typecheck.py.svn-base
|
af54484fb0068339921889c9d74452667e37bb2d
|
[] |
no_license
|
swastikaB/pysleuth
|
e556035b21c9a77049ce45247b32b140724a2026
|
6fea8800c3e4540c75f9a210ff6b0322ed1a178f
|
refs/heads/master
| 2021-01-19T19:39:55.826284
| 2013-03-13T22:39:51
| 2013-03-13T22:39:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,894
|
from sleuth.common.exception import TypeException
from sleuth.lingo.components import *
from sleuth.lingo.types import *
import sys
import copy
'''
First renames all variables within functions to prepend with function name, that way
they do not clash.
Then do standard unification as per Lecture 10 Slides CS-290C Fall 2010.
-Rules are defined in lecture notes
-Keep a map of variable names to the first occurrence, thus we can always
unify that variable (as each is a separate variable in memory).
Finally rename back to original values.
If annotate_types is true, will proceed with annotating the AST with types even
when type inference fails, in order to provide feedback to the user.
When Type inference fails, raises a TypeException to signify this.
'''
class TypeCheck:
def __init__(self,annotate_types):
self.temp_count = 0
self.variables = {}
self.functions = []
self.return_variable = None
self.annotate_types = annotate_types
'''
Print every variable that was visited and types were not inferred.
Exit if any unknown (annotating AST first if specified).
'''
def check_unknown(self):
unknown_variables = ""
for name, var in self.variables.items():
varParent = self.find(var)
if not isinstance(varParent, Type):#didn't infer type
if var.name[0] != "_":#not a temporary variables
unknown_variables += var.name + ", "
if len(unknown_variables) != 0:
print >> sys.stderr, "Could not infer types for: %s" % unknown_variables[0:len(unknown_variables)-2]
if self.annotate_types:
self.rename.visit_program(self.program)
raise TypeException("Could not infer all types.")
'''
Checking for cyclic references.
i.e. a = ref b, check that b != ref a
'''
def free_var(self, var, t):
if isinstance(t, Reference):
return self.free_var(var, t.value)
if isinstance(t, Function):
return [self.free_var(var, x) for x in t.signature]
if isinstance(t, Primitive):
return
if var.name == t.name:
print >> sys.stderr, "cyclic dependency in types involving %s" % var.name
raise TypeException("Cyclic types error.")
if t.name in self.variables:
parent = self.find(self.variables[t.name])
if parent != t:
return self.free_var(var, parent)
'''
Attempt to unify x and y.
@x & y either Variable, or some Type
First find parents using standard union/find algorithm.
Possible cases:
1. If one is a type and other is a variable, always set the type to
the parent.
2. If both are references, unify what they refer too, then allow the standard
case to set who is parent. Otherwise, could not possibly resolve types.
3. Same case, except now functions, so unify the signatures. Error can now also
occur if different length signatures.
4. If x_root is a primitive, then yRoot must be the same primitive.
'''
def unify(self, x, y):
x_root = self.find(x)
y_root = self.find(y)
if isinstance(x_root, Type) and isinstance(y_root, Variable):
self.free_var(y_root, x_root)
y_root.parent = x_root
y_root.rank = max(x_root, y_root)
return
elif isinstance(x_root, Variable) and isinstance(y_root, Type):
self.free_var(x_root, y_root)
x_root.parent = y_root
x_root.rank = max(x_root, y_root)
return
elif isinstance(x_root, Reference) or isinstance(y_root, Reference):
if isinstance(y_root, Reference) and isinstance(x_root, Reference):
self.unify(x_root.value, y_root.value)
else:
self.error("Tried to unify %s and %s, found parent types which could not match: %s and %s" \
% (x, y, x_root, y_root), x_root, y_root)
elif isinstance(x_root, Function) or isinstance(y_root, Function):
if isinstance(y_root, Function) and isinstance(x_root, Function):
if len(x_root.signature) != len(y_root.signature):
self.error("Tried to unify %s and %s, found parent types with non matching signature lengths: %s and %s" \
% (x, y, x_root, y_root), x_root, y_root)
for i in range(len(x_root.signature)):
self.unify(x_root.signature[i], y_root.signature[i])
else:
self.error("Tried to unify %s and %s, found parent types which could not match: %s and %s" \
% (x, y, x_root, y_root), x_root, y_root)
elif isinstance(x_root, Primitive):
if not isinstance(y_root, Primitive) or x_root!=y_root:
self.error("Tried to unify %s and %s, found parent types which could not match: %s and %s" \
% (x, y, x_root, y_root), x_root, y_root)
if x_root.rank > y_root.rank:
y_root.parent = x_root
elif x_root.rank < y_root.rank:
x_root.parent = y_root
elif x_root != y_root: # Unless x and y are already in same set, merge them
y_root.parent = x_root
x_root.rank = x_root.rank + 1
'''
If variable has not been seen before, add to the mapping,
some variables may never be defined, just used (default
integer), so check the references and functions as well
for these variables.
In the case of the variable, use the mapping to find the
parent for all instances of that variable.
'''
def find(self, x):
if isinstance(x,Variable):
if not x.name in self.variables:
self.variables[x.name] = x
else:
x = self.variables[x.name]
if isinstance(x,Reference):
if isinstance(x.value, Variable):
if not x.value.name in self.variables:
self.variables[x.value.name] = x.value
if isinstance(x,Function):
for var in x.signature:
if isinstance(var, Variable):
if not var.name in self.variables:
self.variables[var.name] = var
if x.parent == x:
return x
else:
x.parent = self.find(x.parent)
return x.parent
'''
Visit a Program node in the AST
Visit each function declaration, then each command in the linked list of commands.
'''
def visit_program(self, program):
self.program = program
self.rename = Rename(self.variables)
self.rename.visit_program(program)
for functionDeclaration in program.functions:
functionDeclaration.accept(self)
self.visit_command_block(program.command)
self.check_unknown()
self.rename.visit_program(program)
def visit_assignment_command(self, assignment_command):
#FunctionReturn gives nothing extra from FunctionCall, skip it
if isinstance(assignment_command.expression, FunctionReturn):
return
assigned_variable = assignment_command.assigned_variable
expression = assignment_command.expression
lhs_variable = assigned_variable
# !LHS = RHS, unify: LHS = ref(temp)
if isinstance(assigned_variable, DereferencedVariable):
lhs_variable = self.get_temp(assigned_variable.line_span, assigned_variable.lex_span)
self.unify(Variable(assigned_variable.name, assigned_variable.line_span, \
assigned_variable.lex_span), Reference(lhs_variable, lhs_variable.line_number) )
# LHS = integer, unify: LHS = INTEGER
if isinstance(expression, Number):
self.unify(lhs_variable, Primitive("INTEGER", lhs_variable.line_number))
# LHS = boolean, unify: LHS = BOOLEAN
elif isinstance(expression, Boolean):
self.unify(lhs_variable, Primitive("BOOLEAN", lhs_variable.line_number))
# LHS = fun(a1,a2,...,an), unify: fun = t1->t2->...tn->tn+1, LHS = tn+1 or
# LHS = !fun(a1,a2,...,an), unify: fun = ref(t1->t2->...tn->tn+1), LHS = tn+1
elif isinstance(expression, FunctionCall) :
parameterTypes = [self.get_temp(expression.line_span, expression.lex_span) for parameter in expression.parameter_variables]
returnType = self.get_temp(expression.line_span, expression.lex_span)
self.unify( returnType, lhs_variable )
for parameter in range(len(parameterTypes)):
self.unify( parameterTypes[parameter],\
expression.parameter_variables[parameter] )
parameterTypes.append(returnType)
if isinstance(expression.function_variable, DereferencedVariable):
self.unify(Variable(expression.function_variable.name, expression.function_variable.line_span, expression.function_variable.lex_span), \
Reference(Function(parameterTypes, expression.line_number), \
expression.function_variable.line_number))
else:
self.unify(expression.function_variable, Function(parameterTypes, expression.line_number))
# LHS = BinaryExpression unify: LHS = (infer type of BinaryExpression)
elif isinstance(expression, BinaryExpression):
self.unify(lhs_variable, self.evaluate_known( expression, None))
# LHS = !RHS unify: LHS = temp & RHS = ref(temp)
elif isinstance(expression, DereferencedVariable):
temp_var_rhs = self.get_temp(expression.line_span, expression.lex_span)
self.unify( lhs_variable, temp_var_rhs )
self.unify( Variable(expression.name, expression.line_span, expression.lex_span), \
Reference(temp_var_rhs, temp_var_rhs.line_number) )
# LHS = ref RHS unify: LHS = ref(RHS)
elif isinstance(expression, ReferencedVariable):
self.unify(lhs_variable, Reference(Variable(expression.name, \
expression.line_span, expression.lex_span), expression.line_number) )
# LHS = RHS unify: LHS = RHS
elif isinstance(expression, Variable):
self.unify( lhs_variable,expression )
#LHS = new Type unify: LHS = Ref(Type)
elif isinstance(expression, New):
self.unify(lhs_variable, Reference(expression.allocate_type, lhs_variable.line_number))
''' Check expression evaluates to a boolean, and visit both blocks'''
def visit_if_command(self, if_command):
self.evaluate_known(if_command.expression, Primitive("BOOLEAN", if_command.expression.line_number))
self.visit_command_block(if_command.true_block)
self.visit_command_block(if_command.false_block)
'''Check expression evaluates to a boolean, and visit block '''
def visit_while_command(self, while_command):
self.evaluate_known( while_command.expression, Primitive("BOOLEAN", while_command.expression.line_number) )
self.visit_command_block(while_command.loop_block)
def visit_skip_command(self, skip_command):
pass
''' Does not give anything to unify, but add to variables if not already present. '''
def visit_input_command(self, input_command):
if not input_command.variable.name in self.variables:
self.variables[input_command.variable.name] = input_command.variable
else:
self.evaluate_known( input_command.variable, Primitive("INTEGER", input_command.variable.line_number) )
'''
Standard unification rules. self.return_variable contains the return variable from visiting
the actual function declaration, which can then be unified with the temporary representing
the return type of the function.
'''
def visit_function_declaration(self, function_declaration):
signature = [self.get_temp(function_declaration.line_span, function_declaration.lex_span) for parameter in range(len(function_declaration.definition.parameters)+1)]
self.unify(Variable(function_declaration.name,function_declaration.line_span, function_declaration.lex_span), \
Function(signature, function_declaration.line_number) )
for parameter in range(len(function_declaration.definition.parameters)):
self.unify(signature[parameter], \
function_declaration.definition.parameters[parameter] )
function_declaration.definition.accept(self)
self.unify(signature[len(signature)-1], self.return_variable)
'''store return value for unification later.'''
def visit_return_command(self, return_command):
self.return_variable = return_command.variable
def visit_function_definition(self, function_definition):
self.visit_command_block(function_definition.body)
def visit_command_block(self, command):
while command!=None:
command.accept(self)
command = command.get_next_command()
def get_temp(self, line_span, lex_span):
self.temp_count = self.temp_count + 1
return Variable("_t%d" % (self.temp_count -1), line_span, lex_span)
''' Due to the simplicity of Lingo the operator determines the types of
the operands, thus we take advantage of this.
Recursively check input types for the operators, then in the base
cases unify variables with their appropriate types.
@expression : expression whose type to check/infer
@t : type to check, None if the base call
'''
def evaluate_known(self, expression, t):
#Recursively check operands of binary expression
if isinstance(expression, BinaryExpression):
if isinstance(expression.operator, ArithmeticOperator):
if t != Primitive("INTEGER") and t != None:
self.error("%s was found when integer was expected in the expression %s" % (t, expression), expression)
else:
self.evaluate_known(expression.left_term, Primitive("INTEGER", expression.left_term.line_number))
self.evaluate_known(expression.right_term, Primitive("INTEGER", expression.right_term.line_number))
return Primitive("INTEGER", expression.line_number)
if isinstance(expression.operator, ComparisonOperator):
if t != Primitive("BOOLEAN") and t != None:
self.error("%s was found when boolean was expected in the expression %s" % (t, expression), expression)
else:
self.evaluate_known(expression.left_term, Primitive("INTEGER", expression.left_term.line_number))
self.evaluate_known(expression.right_term, Primitive("INTEGER", expression.right_term.line_number))
return Primitive("BOOLEAN", expression.line_number)
elif isinstance(expression.operator, BooleanOperator):
if t != Primitive("BOOLEAN") and t != None:
self.error("%s was found when boolean was expected in the expression %s" % (t, expression), expression)
else:
self.evaluate_known(expression.left_term, Primitive("BOOLEAN", expression.left_term.line_number))
self.evaluate_known(expression.right_term, Primitive("BOOLEAN", expression.right_term.line_number))
return Primitive("BOOLEAN", expression.line_number)
#t has type of the variable, add that to our type mapping, print an self.error message if type does not match
elif isinstance(expression, DereferencedVariable):
self.unify(expression, Reference(t, expression.line_number))
elif isinstance(expression, ReferencedVariable):
self.error("The referenced variable %s was found in a binary expression" % expression, expression)
elif isinstance(expression, Variable):
self.unify(expression, t)
elif isinstance(expression, Number) :
if t != Primitive("INTEGER"):
self.error("Number literal %s was found when %s was expected" % (expression,t), expression)
else:
return t
elif isinstance(expression, Boolean):
if t!= Primitive("BOOLEAN"):
self.error("Boolean literal %s found when %s was expected." % (expression,t), expression)
else:
return t
else:
self.error("Unknown case %s, or type checker bug encountered" % expression, expression)
def error(self, message, expr1, expr2=None):
if expr2 != None:
print >> sys.stderr, message + " from lines %d and %d. " % (expr1.line_number, expr2.line_number)
else:
print >> sys.stderr, message + " at line %d. " % expr1.line_number
if self.annotate_types:
self.rename.visit_program(self.program)
raise TypeException("Incorrectly typed program.")
'''
Visit a Program node in the AST
Either rename with function scope if rename is true, or rename to defaults if false.
Also if false, annotate types on the AST.
'''
class Rename:
def __init__(self, variables):
self.function_scope = ""
self.functions=[]
self.variables = variables
self.rename = True
def get_type(self, x):
xBase = x #Maintain if we find root and don't know type
if isinstance(x, Variable):
if x.name in self.variables: #if encountered error, some variables were never checked
x = self.find(self.variables[x.name])
else:
return None
if isinstance(x, Primitive):
return x
elif isinstance(x, Reference):
return Reference(self.get_type(x.value), 0)
elif isinstance(x, Function):
return Function([self.get_type(sig) for sig in x.signature], 0)
return None #Type not known
def find(self, x):
if x.parent == x:
return x
else:
x.parent = self.find(x.parent)
return x.parent
def visit_program(self, program):
self.functions = [functionDeclaration.name for functionDeclaration in program.functions]
for functionDeclaration in program.functions:
functionDeclaration.accept(self)
self.visit_command_block(program.command)
self.rename = False
def visit_assignment_command(self, assignment_command):
#visit to rename variables
#Already assigned when FunctionCall was visited, do not rename!
if isinstance(assignment_command.expression, FunctionReturn):
return
assignment_command.expression.accept(self)
assignment_command.assigned_variable.accept(self)
def visit_if_command(self, if_command):
if_command.expression.accept(self)
self.visit_command_block(if_command.true_block)
self.visit_command_block(if_command.false_block)
def visit_while_command(self, while_command):
while_command.expression.accept(self)
self.visit_command_block(while_command.loop_block)
def visit_skip_command(self, skip_command):
pass
def visit_function_declaration(self, function_declaration):
oldScope = self.function_scope
self.function_scope = self.function_scope + function_declaration.name + "_"
for parameter in range(len(function_declaration.definition.parameters)):
function_declaration.definition.parameters[parameter].accept(self)
function_declaration.definition.accept(self)
self.function_scope = oldScope
def visit_return_command(self, return_command):
return_command.variable.accept(self)
def visit_input_command(self, input_command):
input_command.variable.accept(self)
def visit_new(self, new):
pass
''' Visit both sides for renaming'''
def visit_binary_expression(self, binary_expression):
binary_expression.left_term.accept(self)
binary_expression.right_term.accept(self)
'''Rename variables in a function call'''
def visit_function_call(self, function_call):
function_call.function_variable.accept(self)
for parameter in function_call.parameter_variables:
parameter.accept(self)
def visit_function_return(self, function_return):
pass
def visit_function_definition(self, function_definition):
self.visit_command_block(function_definition.body)
def visit_variable(self, variable):
if not self.rename:
variable.type = copy.deepcopy(self.get_type(variable))
if variable.name not in self.functions:
if self.rename:
self.append_scope(variable)
else:
self.remove_scope(variable)
def visit_referenced_variable(self, referenced_variable):
if not self.rename:
var = Variable(referenced_variable.name, referenced_variable.line_span, \
referenced_variable.lex_span)
referenced_variable.type = copy.deepcopy(self.get_type(var))
if referenced_variable.name not in self.functions:
if self.rename:
self.append_scope(referenced_variable)
else:
self.remove_scope(referenced_variable)
def visit_dereferenced_variable(self, dereferenced_variable):
if self.rename:
self.append_scope(dereferenced_variable)
else:
var =Variable(dereferenced_variable.name, dereferenced_variable.line_span, \
dereferenced_variable.lex_span)
dereferenced_variable.type = copy.deepcopy(self.get_type(var))
self.remove_scope(dereferenced_variable)
def visit_number(self, number):
pass
def visit_boolean(self, boolean):
pass
def visit_command_block(self, command):
while command!=None:
command.accept(self)
command = command.get_next_command()
def append_scope(self, variable):
variable.name = self.function_scope + variable.name
def remove_scope(self, variable):
variable.name = variable.name[len(self.function_scope):]
|
[
"mkedlaya@cs.ucsb.edu"
] |
mkedlaya@cs.ucsb.edu
|
|
466fc433679f2ffd757047383ae1a1f4f49c622c
|
0fea5b92baacf23d89c2e1a218fc2b3a0e52cb8d
|
/python/Scraper/download_with_retry.py
|
faae0e727a2780f06cb177a061f9eb41db9328c0
|
[
"Apache-2.0"
] |
permissive
|
davidgjy/arch-lib
|
d0d426c97584e38371db53869878eedbf95e748a
|
b4402b96d2540995a848e6c5f600b2d99847ded6
|
refs/heads/master
| 2021-01-20T09:07:20.706972
| 2018-02-12T11:53:34
| 2018-02-12T11:53:34
| 90,223,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
import urllib.request
def download(url, num_retries=2):
print('Downloading:', url)
try:
html = urllib.request.urlopen(url).read()
except urllib.URLError as e:
print('Download error:' % e.reason)
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# recursively retry 5xx HTTP errors
return download(url, num_retries-1)
return html
url = 'http://www.baidu.com'
print(download(url, 3))
|
[
"davidgjy@163.com"
] |
davidgjy@163.com
|
0efcb951cae29071b43f4b4e3e8a409bd16f1465
|
dd6067dee3f89ae8ceb7fec024b67842c7656281
|
/comparator.py
|
5501a46e1aca830045c0c4f79d9a18ad93a32334
|
[] |
no_license
|
JoanBas/CLUSTER
|
7d5b4876f522c0a932793e8c5c7ce29303ce9810
|
a0b7235f659d02b0aac6dc8bd4a04dd200da28e4
|
refs/heads/master
| 2021-01-11T05:49:37.306695
| 2017-06-21T17:03:14
| 2017-06-21T17:03:14
| 94,894,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,354
|
py
|
from MODEL_multi_combined import MODEL
import numpy as np
import multiprocessing as mtp
def two_options():
event_pairs=[["views","comments"],["edits","comments"]]
for event_pair in event_pairs:
total_means=[]
for i in range(10):
print "RONDA", i
mu_increased_factors=[1.,1.2]
n_events_total=[]
for factor in mu_increased_factors:
n_events=[]
model=MODEL(time=1, increased_duration_factor=1.,event_kinds=event_pair,mu_increase_factor=factor)
for j in range(100):
n_events.append(model.simulate(plot=0))
n_events=np.asarray(n_events)
n_events_total.append(n_events)
means=[]
for ne in n_events_total:
means.append(np.mean(ne,axis=0))
total_means.append(means)
print total_means
file=open("_".join(event_pair)+".csv","wt")
for means in total_means:
towrite=[]
for factor_trial in means:
towrite+= [factor_trial[0],factor_trial[2]]
print towrite
file.write(",".join([str(val) for val in towrite])+"\n")
file.close()
def single(num,a):
event_pair=["views","tools"]
means = []
stds = []
for option in range(3):
if option == 0: mu_increased_factors = [1., 1.]
elif option == 1: mu_increased_factors = [1.3, 1.]
elif option == 2: mu_increased_factors = [1., 1.3]
else: exit()
total_means=[]
model = MODEL(time=1, increased_duration_factor=1., event_kinds=event_pair,
mu_increase_factor=mu_increased_factors)
n_events = []
for i in range(50):
print "RONDA ", i, "option ", option
n_events.append(model.simulate(plot=0))
n_events = np.asarray(n_events)
print option, n_events
mean=np.mean(n_events,0)
std=np.std(n_events,0)
print mean, std
means.append(mean)
stds.append(std)
print means
print stds
np.savetxt("views_tools_mean"+str(num)+".csv", np.asarray(means))
np.savetxt("views_tools_stds"+str(num)+".csv", np.asarray(stds))
jobs=[]
for num in range(5):
p=mtp.Process(target=single, args=(num,5))
jobs.append(p)
p.start()
|
[
"joan_bas@hotmail.com"
] |
joan_bas@hotmail.com
|
b322ea2d72b18ac419fd10def38fddbe0d30c1b5
|
3522a0e5806f9a4727ed542aab8f1eff610728c7
|
/src/store/models/Tag.py
|
8bdd285037ae9feda1cece5c858ea56bc0a267f2
|
[
"MIT"
] |
permissive
|
jswilson/range-tagger
|
f17cf82011cfef97cfec84522e9c0d70cc0e9fc0
|
0a60c7c44af5effb77ded80005bc7a066ca65c60
|
refs/heads/master
| 2020-07-07T03:43:43.542639
| 2019-09-02T22:05:28
| 2019-09-02T22:05:28
| 203,235,243
| 1
| 0
| null | 2019-09-02T17:25:38
| 2019-08-19T19:18:38
|
Python
|
UTF-8
|
Python
| false
| false
| 185
|
py
|
import uuid
class Tag:
def __init__(self, name):
self.id = uuid.uuid4()
self.name = name
def __eq__(self, other):
return str(self.id) == str(other.id)
|
[
"js.wilson@ymail.com"
] |
js.wilson@ymail.com
|
45149d5320d27687d7ff31975d14835cd619efa7
|
5d77833445b1ef95b5ca7b9a886f98cb38a16286
|
/code/9-12 TacotronDecoderwrapper.py
|
28ddda9aacb18edb2af96dfac848ac5941305610
|
[] |
no_license
|
wangbin0227/TensorFlow_Engineering_Implementation
|
bbafa4933c3244b65f0d3a2625fd58a9f8726c34
|
cb787e359da9ac5a08d00cd2458fecb4cb5a3a31
|
refs/heads/master
| 2023-03-18T10:58:58.916184
| 2021-03-16T15:03:49
| 2021-03-16T15:03:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,362
|
py
|
"""
@author: 代码医生工作室
@公众号:xiangyuejiqiren (内有更多优秀文章及学习资料)
@来源: <深度学习之TensorFlow工程化项目实战>配套代码 (700+页)
@配套代码技术支持:bbs.aianaconda.com (有问必答)
"""
import tensorflow as tf
from tensorflow.python.framework import ops, tensor_shape
from tensorflow.python.ops import array_ops, check_ops, rnn_cell_impl, tensor_array_ops
from tensorflow.python.util import nest
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
attention = __import__("9-11 attention")
LocationSensitiveAttention = attention.LocationSensitiveAttention
class TacotronDecoderwrapper(tf.nn.rnn_cell.RNNCell):
#初始化
def __init__(self,encoder_outputs, is_training, rnn_cell, num_mels , outputs_per_step):
super(TacotronDecoderwrapper, self).__init__()
self._training = is_training
self._attention_mechanism = LocationSensitiveAttention(256, encoder_outputs)# [N, T_in, attention_depth=256]
self._cell = rnn_cell
self._frame_projection = tf.keras.layers.Dense(units=num_mels * outputs_per_step, name='projection_frame')# [N, T_out/r, M*r]
# # [N, T_out/r, r]
self._stop_projection = tf.keras.layers.Dense(units=outputs_per_step,name='projection_stop')
self._attention_layer_size = self._attention_mechanism.values.get_shape()[-1].value
self._output_size = num_mels * outputs_per_step#定义输出大小
def _batch_size_checks(self, batch_size, error_message):
return [check_ops.assert_equal(batch_size, self._attention_mechanism.batch_size,
message=error_message)]
@property
def output_size(self):
return self._output_size
#@property
def state_size(self):#返回的状态大小(代码参考AttentionWrapper)
return tf.contrib.seq2seq.AttentionWrapperState(
cell_state=self._cell._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._attention_mechanism.alignments_size,
alignment_history=(),#)#,
attention_state = ())
def zero_state(self, batch_size, dtype):#返回一个0状态(代码参考AttentionWrapper)
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of TacotronDecoderCell %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
return tf.contrib.seq2seq.AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=tf.int32),
attention=rnn_cell_impl._zero_state_tensors(self._attention_layer_size, batch_size, dtype),
alignments=self._attention_mechanism.initial_alignments(batch_size, dtype),
alignment_history=tensor_array_ops.TensorArray(dtype=dtype, size=0,dynamic_size=True),
attention_state = tensor_array_ops.TensorArray(dtype=dtype, size=0,dynamic_size=True)
)
def __call__(self, inputs, state):#本时刻的真实输出y,decoder对上一时刻输出的状态。一起预测下一时刻
drop_rate = 0.5 if self._training else 0.0#设置dropout
#对输入预处理
with tf.variable_scope('decoder_prenet'):# [N, T_in, prenet_depths[-1]=128]
for i, size in enumerate([256, 128]):
dense = tf.keras.layers.Dense(units=size, activation=tf.nn.relu, name='dense_%d' % (i+1))(inputs)
inputs = tf.keras.layers.Dropout( rate=drop_rate, name='dropout_%d' % (i+1))(dense, training=self._training)
#加入注意力特征
rnn_input = tf.concat([inputs, state.attention], axis=-1)
#经过一个全连接变换。再传入解码器rnn中
rnn_output, next_cell_state = self._cell(tf.keras.layers.Dense(256)(rnn_input), state.cell_state)
#计算本次注意力
context_vector, alignments, cumulated_alignments =attention_wrapper._compute_attention(self._attention_mechanism,
rnn_output,state.alignments,None)#state.alignments为上一次的累计注意力
#保存历史alignment(与原始的AttentionWrapper一致)
alignment_history = state.alignment_history.write(state.time, alignments)
#返回本次的wrapper状态
next_state = tf.contrib.seq2seq.AttentionWrapperState( time=state.time + 1,
cell_state=next_cell_state,attention=context_vector,
alignments=cumulated_alignments, alignment_history=alignment_history,
attention_state = state.attention_state)
#计算本次结果:将解码器输出与注意力结果concat起来。作为最终的输入
projections_input = tf.concat([rnn_output, context_vector], axis=-1)
#两个全连接分别预测输出的下一个结果和停止标志<stop_token>
cell_outputs = self._frame_projection(projections_input)#得到下一次outputs_per_step个帧的mel特征
stop_tokens = self._stop_projection(projections_input)
if self._training==False:
stop_tokens = tf.nn.sigmoid(stop_tokens)
return (cell_outputs, stop_tokens), next_state
|
[
"aianaconda@qq.com"
] |
aianaconda@qq.com
|
035b1c30c5a811cf38181ee197300dfdf2e01e7d
|
e9ba199fd5632e647183664ac5892a22251329d1
|
/Curso/Mundo 2/Desafio037.py
|
b1052623fc091f464879211ecf3b5387eba970ae
|
[] |
no_license
|
Igor-Ferraz7/CursoEmVideo-Python
|
625aa694aa8e6d0d93d37a732dd1412097a735e8
|
e59d7abec5fb69b5c96999701d641054360d5ade
|
refs/heads/master
| 2023-04-30T14:28:43.532616
| 2021-05-05T23:56:43
| 2021-05-05T23:56:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
br = '\033[30m'
vm = '\033[4;31m'
vd = '\033[32m'
a = '\033[33m'
az = '\033[34m'
r = '\033[35m'
ci = '\033[36m'
des = '\033[m'
cvs = '\033[36mConversor\033[30m'
print(f'{br}{cvs:=^78}')
n = int(input(f'\033[4;36mDigite o número inteiro{des}: '))
bi = bin(n)
oc = oct(n)
he = hex(n)
print(f'{br}[1] {a}Binário \n{br}[2] {r}Octal\n{br}[3] {az}Hexadecimal')
c = int(input(f'\033[4;36mEscolha um deles para fazer a conversão{des}: '))
if c == 1:
print(f'{a}O valor {vd}{n}{a} em binário é{des}: {vd}{bi[2:]}')
elif c == 2:
print(f'{r}O valor {vd}{n}{r} em octal é{des}: {vd}{oc[2:]}')
elif c == 3:
print(f'{az}O valor {vd}{n}{az} em hexadecimal é{des}: {vd}{he[2:]}')
else:
print(f'{vm}- Opção inválida. Tente novamente{des}{br}.')
fim = '\033[36mFIM\033[30m'
print(f'{des}{br}{fim:=^78}')
|
[
"igorsousaferrazaraga2@gmail.com"
] |
igorsousaferrazaraga2@gmail.com
|
ef26e4572a36ca6d9678ccc256904ec09d6b3688
|
2cff704d26840ca5d4f543c30acf0beb6edadda5
|
/rig/exp_functions.py
|
c09f6b3b7190adb8e432a2e53d1fa1f93707da6c
|
[
"MIT"
] |
permissive
|
tgbugs/mlab
|
ff7b8ae366bb6abf5b71e39f020cc80b4079e774
|
dacc1663cbe714bb45c31b1b133fddb7ebcf5c79
|
refs/heads/master
| 2020-04-09T10:33:24.335267
| 2016-05-03T23:18:33
| 2016-05-03T23:18:33
| 12,688,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,126
|
py
|
import re
import datetime
import inspect as ins
from sys import stdout
from time import sleep
from debug import TDB,ploc
try:
import rpdb2
except:
pass
tdb=TDB()
printD=tdb.printD
printFD=tdb.printFuncDict
tdbOff=tdb.tdbOff
#file to consolidate all the different functions I want to execute using the xxx.Control classes
#TODO this file needs a complete rework so that it can pass data to the database AND so that it can be used by keyboard AND so that it can be used by experiment scripts... means I may need to split stuff up? ;_;
#TODO rig control vs experiment control... these are technically two different 'modes' one is keyboard controlled the other is keyboard initiated...
#TODO ideally I want to do experiments the same way every time instead of allowing one part here and another there which is sloppy so those are highly ordered...
#TODO BUT I need a way to fix things, for example if the slice moves and I need to recalibrate the slice position (FUCK, how is THAT going to work out in metadata)
#TODO all of these are configured for terminal output only ATM, ideally they should be configged by whether they are called from keyboard or from experiment... that seems... reasonable??! not very orthogonal...
#mostly because when I'm running an experiment I don't want to accientally hit something or cause an error
#TODO split in to send and recieve?!?
#TODO datasource/expected datasource mismatch
class kCtrlObj:
"""key controller object"""
def __init__(self, modestate, controller=lambda:None):
self.charBuffer=modestate.charBuffer
self.keyHandler=modestate.keyHandler
#I probably do not need to pass key handler to thing outside of inputManager...
#yep, not used anywhere, but I supose it could be used for submodes... we'll leave it in
self.setMode=modestate.setMode
self.updateModeDict=modestate.updateModeDict
self.__mode__=self.__class__.__name__
self.keyThread=modestate.keyThread
self.ikCtrlDict=modestate.ikCtrlDict
self.controller=controller
self.initController(self.controller)
def reloadControl(self): #this wont work because it wont write or something....
printD('reiniting controller')
rpdb2.setbreak()
try:
self.ctrl.cleanup()
del(self.ctrl)
from mcc import mccControl
self.ctrl=Control()
self.ikCtrlDict[self.__mode__]=self
self.updateModeDict()
except:
printD('FAILURE')
raise IOError
return self
def initController(self,controller):
try:
self.ctrl=controller()
print('[OK]',controller.__name__,'started')
except:
print('[!] **LOAD ERROR**',controller.__name__,'not started, will listen for start')
self.ctrl=lambda:None
from threading import Thread
#self.pollThrd=Thread(target=self.pollForCtrl,args=(controller,))
#self.pollThrd.start()
self.ikCtrlDict[self.__mode__]=self
self.updateModeDict()
def pollForCtrl(self,controller): #FIXME maybe we SHOULD do this here since these are more tightly integrated with modestate
while self.keyThread.is_alive():
try:
self.ctrl=controller()
printD(self)
print('[OK]',controller.__name__,'started')
#printD(self.__mode__)
#self.ikCtrlDict[self.__mode__]=self
self.updateModeDict()
break
except:
sleep(2)
def wrapDoneCB(self):
class wrap:
def __init__(self,call,pre=lambda:None,post=lambda:None):
self.start=pre
self.do=call
self.stop=post
def go(self,*args):
#printD('wat')
self.start()
out=self.do(*args)
self.stop()
return out
excluded=['cleanup','__init__','doneCB','readProgDict','updateModeDict','setMode']
mems=ins.getmembers(self)
funcs=[func for func in mems if ins.ismethod(func[1]) and func[0] not in excluded]
#printFD(funcs)
for tup in funcs:
setattr(self,tup[0],wrap(tup[1],self.doneCB).go)
def cleanup(self):
pass
class clxFuncs(kCtrlObj):
def __init__(self, modestate):
from clx import clxControl
super().__init__(modestate,clxControl)
#self.initController(clxmsg)
#printD('clx ctrl',self.ctrl)
#self.clxCleanup=self.cleanup
self.programDict={}
#self.wrapDoneCB()
#class only
def readProgDict(self,progDict):
self.programDict=progDict
return self
def cleanup(self):
super().cleanup()
try:
self.ctrl.DestroyObject()
print(self.ctrl.__class__,'handler destroyed')
except:
pass
#print('this this works the way it is supposed to the we should never have to destory the object')
#input with output
def getStatus(self,outputs): #TODO outputs... should be able to output to as many things as I want... probably should be a callback to simplify things elsewhere? no?!?!
status=self.ctrl.GetStatus()
print(status)
return self
def load(self,key=None):
if not key:
print('Please enter the program to load')
self.keyHandler(1)
key=self.charBuffer.get()
try:
path=self.programDict[key]
#printD(path)
self.ctrl.LoadProtocol(path.encode('ascii'))
except:
print('Program not found')
raise
return self
#input only
def startMembTest(self):
self.ctrl.StartMembTest(120)
self.ctrl.StartMembTest(121)
return self
class datFuncs(kCtrlObj):
#interface with the database TODO this should be able to run independently?
"""Put ANYTHING permanent that might be data in here"""
def __init__(self, modestate):
#from database.models import * #DAMNIT FIXME
super().__init__(modestate)
self.markDict={}
self.posDict={}
self.MCCstateDict={}
#self.wrapDoneCB()
self.updateModeDict()
#FIXME
#this class should be the one to get data out of dataman
#dataman should have a method 'saveData' that takes the source class (self) and the data and stores it
def newExperiment(self):
return self
def newCell(self):
return self
def newSlice(self):
return self
def addMetaData(self):
return self
def addDataFile(self): #FIXME not sure this should go here...
return self
def getUserInputData(self):
"""Sadly there is still some data that I can't automatically collect"""
#get cell depths FROM SAME STARTING POINT??? measure this before expanding tissue with internal???
return self
class mccFuncs(kCtrlObj): #FIXME add a way to get the current V and I via... telegraph?
def __init__(self, modestate):
from mcc import mccControl
super().__init__(modestate,mccControl) #FIXME this needs better error messages
#self.initController(mccmsg)
self.MCCstateDict={}
#self.wrapDoneCB()
self.updateModeDict()
#associated metadata sources
self.state1DataSource=None
def inpWait(self):
#wait for keypress to move to the next program, this may need to spawn its own thread?
print('HIT ANYTHING TO ADVANCE! (not the dog, that could end poorly)')
self.keyHandler(1)
self.charBuffer.get()
return self
def getState(self): #FIXME this function and others like it should probably be called directly by dataman?
printD('hMCCmsg outer',self.ctrl.hMCCmsg)
def base():
state.append(self.ctrl.GetHoldingEnable())
state.append(self.ctrl.GetHolding())
state.append(self.ctrl.GetPrimarySignal())
state.append(self.ctrl.GetPrimarySignalGain())
state.append(self.ctrl.GetPrimarySignalLPF())
state.append(self.ctrl.GetPipetteOffset())
def vc():
base()
state.append(self.ctrl.GetFastCompCap())
state.append(self.ctrl.GetSlowCompCap())
state.append(self.ctrl.GetFastCompTau())
state.append(self.ctrl.GetSlowCompTau())
state.append(self.ctrl.GetSlowCompTauX20Enable())
def ic():
base()
state.append(self.ctrl.GetBridgeBalEnable())
state.append(self.ctrl.GetBridgeBalResist())
def iez():
base()
modeDict={0:vc,1:ic,2:iez}
stateList=[]
for i in range(self.ctrl.mcNum):
self.ctrl.selectMC(i)
state=[] #FIXME: make this a dict with keys as the name of the value? eh would probs complicate
state.append(i) #might be suprflulous but it could simplify the code to read out stateList
mode=self.ctrl.GetMode()
state.append(mode)
modeDict[mode]()
stateList.append(state)
print(state)
self.MCCstateDict[datetime.datetime.utcnow()]=stateList
return self
def printMCCstate(self):
print(re.sub('\), ',')\r\n',str(self.MCCstateDict)))
return self
def setMCState(self,MC=None,Mode=None,Holding=None,HoldingEnable=None): #TODO
#FIXME all of the experiment logic needs to be stored in one place instead of hidden in 10 files
#selectMC,SetMode,SetHolding,SetHoldingEnable,
#self.ctrl.selectMC()
return self
def allIeZ(self):
self.ctrl.selectMC(0)
self.ctrl.SetMode(2)
self.ctrl.selectMC(1)
self.ctrl.SetMode(2)
return self
def allVCnoHold(self):
#try:
self.ctrl.selectMC(0)
self.ctrl.SetMode(0)
self.ctrl.SetHoldingEnable(0)
self.ctrl.selectMC(1)
self.ctrl.SetMode(0)
self.ctrl.SetHoldingEnable(0)
#except:
#raise BaseException
return self
def allVChold_60(self):
self.ctrl.selectMC(0)
self.ctrl.SetMode(0)
self.ctrl.SetHolding(-.06)
self.ctrl.SetHoldingEnable(1)
self.ctrl.selectMC(1)
self.ctrl.SetMode(0)
self.ctrl.SetHolding(-.06)
self.ctrl.SetHoldingEnable(1)
return self
def allICnoHold(self):
self.ctrl.selectMC(0)
self.ctrl.SetMode(1)
self.ctrl.SetHoldingEnable(0)
self.ctrl.selectMC(1)
self.ctrl.SetMode(1)
self.ctrl.SetHoldingEnable(0)
return self
def testZtO_75(self):
self.ctrl.selectMC(0)
self.ctrl.SetMode(1)
self.ctrl.SetHoldingEnable(0)
self.ctrl.selectMC(1)
self.ctrl.SetMode(0)
self.ctrl.SetHolding(-.075)
self.ctrl.SetHoldingEnable(1)
return self
def testOtZ_75(self):
self.ctrl.selectMC(0)
self.ctrl.SetMode(0)
self.ctrl.SetHolding(-.075)
self.ctrl.SetHoldingEnable(1)
self.ctrl.selectMC(1)
self.ctrl.SetMode(1)
self.ctrl.SetHoldingEnable(0)
return self
def zeroVChold_60(self):
self.ctrl.selectMC(0)
self.ctrl.SetMode(0)
self.ctrl.SetHolding(-.06)
self.ctrl.SetHoldingEnable(1)
return self
def oneVChold_60(self):
self.ctrl.selectMC(1)
self.ctrl.SetMode(0)
#self.ctrl.poops(1) #awe, this is broken now due to something
self.ctrl.SetHolding(-.06)
self.ctrl.SetHoldingEnable(1)
return self
def cleanup(self):
super().cleanup()
try:
self.ctrl.DestroyObject()
print(self.ctrl.__class__,'handler destroyed')
except:
pass
class espFuncs(kCtrlObj):
def __init__(self, modestate):
from esp import espControl
super().__init__(modestate,espControl)
self.markDict={} #FIXME
self.posDict={} #FIXME
#self.initController(npControl)
self.updateModeDict()
self.modestate=modestate
self.setMoveDict()
#self.event=modestate.event
#associated metadatasources:
self.EspXDataSource=None
self.EspYDataSource=None
def getPos(self):
#may want to demand a depth input (which can be bank)
#try:
pos=self.ctrl.getPos()
#self.doneCB()
self.posDict[datetime.datetime.utcnow()]=pos #FIXME dat should handle ALL of this internally
print(pos)
#except:
#printD('oops')
#raise
return self
def setPos(self,x,y):
self.ctrl.setPos((x,y)) #FIXME may need BsetPos
def cleanup(self):
super().cleanup()
self.ctrl.cleanup()
return self
def main():
esp=espFuncs(None,None,None,None)
#mcc=mccFuncs(None,None,None,None)
if __name__=='__main__':
main()
|
[
"tgbugs@gmail.com"
] |
tgbugs@gmail.com
|
eede51ebc331fa51514c6f113c817b1613668960
|
ba7c9471429123c740bbc0b146245f5d1ce427ca
|
/blog/users/utils.py
|
88c730a5e98dfd6c3b278bc1113c8793b9dc2027
|
[] |
no_license
|
pcoffey/Flask_Blog
|
37ac9cf4bd06e0cec44179e579760760c5d966e8
|
5941da349318ee7a7014f209a23c1be198573d3e
|
refs/heads/master
| 2020-03-24T19:56:10.683092
| 2018-08-02T02:05:53
| 2018-08-02T02:05:53
| 142,951,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
import os
import secrets
from PIL import Image
from flask import url_for, current_app
from flask_mail import Message
from blog import mail
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(current_app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='noreply@demo.com',
recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('users.reset_token', token=token, _external=True)}
if you did not make this request then simply ignore this email and no change will be applied
'''
mail.send(msg)
|
[
"pcoffey2@gmail.com"
] |
pcoffey2@gmail.com
|
a9e2383f7e2a435a177bc299495f2ad72c71be62
|
8d5f3ec2d50f1cb7d694a1016105bcf37b3dc829
|
/distinctcharacters.py
|
d0fe31912d0f98b3ae56ee7d51bdff55635bb86c
|
[] |
no_license
|
SushantSriv/CODECHEF_python-codes
|
acbbabb33d1481d32f3b70b517927631703fa43f
|
a4524356e8d19ba1206e1688f9e307c7d462c213
|
refs/heads/master
| 2021-09-21T15:32:42.777321
| 2018-08-28T15:33:49
| 2018-08-28T15:33:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
for test in range(int(input())):
s=input()
print(len(list(set(s))))
|
[
"noreply@github.com"
] |
noreply@github.com
|
5e44160023e022affc7fdd6e109324020be79d42
|
da5ada14fae42304263d1e36ca8b8eeda289fe0a
|
/class_ex/rest_api/rest_modify.py
|
4f4514f7020cc8654efa9927ddcc25cfa9d0a248
|
[] |
no_license
|
abhinav2938/Kirk_python-course
|
8588ba671f4c98a9adeeca783885d6fa2706909a
|
0bde7a2b09c67c628a9fc78dac773ec2603ae249
|
refs/heads/main
| 2023-01-22T18:13:10.668803
| 2020-11-25T18:20:11
| 2020-11-25T18:20:11
| 303,238,515
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
import requests
import json
import os
from pprint import pprint
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category= InsecureRequestWarning)
if __name__ == '__main__':
#token = '63aa375e2590159ca3171c5269931043b85d33cf'
token = os.environ['NETBOX_TOKEN']
url = 'https://netbox.lasthop.io/api/dcim/devices/8/'
#url = 'https://api.github.com/'
http_headers = {'accept': 'application/json; version=2.4;',
'authorization' : 'Token {}'.format(token),
}
response = requests.get(url, headers = http_headers, verify = False)
arista10 = response.json()
#Now doing PUT operation with new http_headers
http_headers = {'Content-Type' : 'application/json; version=2.4;',
'authorization' : 'Token {}'.format(token),
}
#Reformat to modify the arista10 object
for field in ['device_role', 'device_type', 'platform' ,'site' , 'rack']:
arista10[field] = arista10[field]['id']
arista10['status'] = 1
arista10['rack'] = 2
response = requests.put(url, headers = http_headers, data = json.dumps(arista10), verify = False)
response = response.json()
print()
pprint(response)
print()
|
[
"abhinavkumar2938@gmail.com"
] |
abhinavkumar2938@gmail.com
|
52af385f78f4d9e578bfe1d8e848885d90d97bd9
|
3adf57ec6763d53e2e762e67244dafb8553e86ec
|
/zoara_model_build_val.py
|
e11494aede0a40f4f87639ea38859ba9739d7886
|
[] |
no_license
|
baronessvonblixen/bonfire-of-vanities
|
d7b330157092c2ccd2b9416be69cedcae9fc982a
|
622cac439a0917f0551891823a96404f452e0dae
|
refs/heads/master
| 2020-09-20T09:53:34.159053
| 2020-03-11T15:25:38
| 2020-03-11T15:25:38
| 224,442,901
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,967
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 30 16:49:31 2020
@author: meghan
"""
start with importing data set as a dataframe in pandas
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
df_stalking_complete = pd.read_csv("/Users/meghan/Desktop/2006STALKINGSUPPLEMENT/DS0005/20080-0005-Data.tsv",sep='\t')
print(df_stalking_complete)
#count the number of occurances for attacks or attack attempts
# 1=yes, 2=no, 8=residue, 9=out of universe
attack_cols = ['S0156', 'S0157', 'S0158', 'S0159', 'S0160', 'S0161', 'S0162', 'S0163', 'S0164', 'S0165']
attack_df = df_stalking_complete[attack_cols]
attack_df.apply(pd.Series.value_counts)
#count the number of occurances for injuries sustained
# 1=yes, 0=no, 9=out of universe
inj_cols = ['S0178','S0179', 'S0180', 'S0181', 'S0182', 'S0183', 'S0184', 'S0185', 'S0186', 'S0187']
inj_df = df_stalking_complete[inj_cols]
inj_df.apply(pd.Series.value_counts)
#count the number of occurances for property damage
# 1=yes, 0=no, 9=out of universe
prop_cols = ['S0153','S0154', 'S0155']
prop_df = df_stalking_complete[prop_cols]
prop_df.apply(pd.Series.value_counts)
#create a column that indicates escalation or not
merge_attack_prop_df = pd.concat([attack_df, prop_df],axis=1)
merge_attack_prop_df
#create a column that indicates escalation or not
#sum indicates number of unique escalation cases
escalation_list = []
for row in merge_attack_prop_df.iterrows():
if 1 in row[1].values:
escalation_list.append(1)
else:
escalation_list.append(0)
sum(escalation_list)
attack_list=[]
prop_list=[]
for row in attack_df.iterrows():
if 1 in row[1].values:
attack_list.append(1)
else:
attack_list.append(0)
for row in prop_df.iterrows():
if 1 in row[1].values:
prop_list.append(1)
else:
prop_list.append(0)
print(sum(attack_list))
print(sum(prop_list))
#clean data frame so that there are binary "1"s for affirmative responses and nothing else
df_clean_w_indicators = df_stalking_complete.where(lambda x:x==1, other=0)
#df_clean
#sum(df_clean['S0156'] )
#remove indicators from predictors
df_clean = df_clean_w_indicators.drop(['S0166', 'S0167', 'S0177','S0156', 'S0157', 'S0158', 'S0159', 'S0160', 'S0161', 'S0162', 'S0163', 'S0164', 'S0165', 'S0153','S0154', 'S0155', 'S0176', 'S0175', 'S0178','S0179', 'S0180', 'S0181', 'S0182', 'S0183', 'S0184', 'S0185', 'S0186', 'S0187'], axis=1)
print(df_clean)
#append the value in escalation_df to the end of the complete data set
#first make the list a dataframe
escalation_df = pd.DataFrame(escalation_list)
complete_w_escalation_df = pd.concat([df_clean, escalation_df],axis=1)
complete_w_escalation_df.rename(columns={0:'ESCAL'}, inplace=True)
complete_w_escalation_df
id_as_stalk = sum(complete_w_escalation_df['S0352'])
print(id_as_stalk)
complete_sort_by_incd = complete_w_escalation_df.sort_values(by=['S0352'], ascending=False)
pos_incd_only_df = complete_sort_by_incd[1:729]
no_incd_only_df = complete_sort_by_incd[730:78741]
print(sum(pos_incd_only_df['ESCAL']))
print(sum(no_incd_only_df['ESCAL']))
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
X = pos_incd_only_df.drop(['ESCAL'], axis=1)
y = pos_incd_only_df['ESCAL']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
clf_train = LogisticRegression(random_state=0).fit(X_train,y_train)
y_pred = clf_train.predict(X_test)
y_pred_proba = clf_train.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(fpr,tpr,label="data 1, auc="+str(auc))
plt.legend(loc=4)
score = clf_train.score(X_test, y_test)
print(score)
cfm = metrics.confusion_matrix(y_test, y_pred)
cfm
A = pos_incd_only_df.drop(['ESCAL'], axis=1)
b = pos_incd_only_df['ESCAL']
A_challenge = no_incd_only_df.drop(['ESCAL'], axis=1)
b_challenge = no_incd_only_df['ESCAL']
clf_train = LogisticRegression(random_state=0).fit(A,b)
b_pred = clf_train.predict(A_challenge)
score = clf_train.score(A_challenge, b_challenge)
print(score)
b_pred_proba = clf_train.predict_proba(A_challenge)[::,1]
fpr, tpr, _ = metrics.roc_curve(b_challenge, b_pred_proba)
auc = metrics.roc_auc_score(b_challenge, b_pred_proba)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(fpr,tpr,label="data 1, auc="+str(auc))
plt.legend(loc=4)
cfm = metrics.confusion_matrix(b_challenge, b_pred)
print(cfm)
coefficients = clf_train.coef_
print(coefficients)
print(coefficients.type)
features_id = list(zip(coefficients[0], A.columns))
from sklearn.feature_selection import RFE
rfe = RFE(clf_train, 25)
fit = rfe.fit(A, b)
print("Num Features: %d" % fit.n_features_)
print("Selected Features: %s" % fit.support_)
#print("Feature Ranking: %s" % fit.ranking_)
selected_features_boolean_df = pd.DataFrame(fit.support_)
features_id_df = pd.DataFrame(features_id)
features_ranking = pd.concat([features_id_df, selected_features_boolean_df], axis=1)
features_ranking.columns= ['coef', 'code', 'bool']
features_ranking_sort = features_ranking.sort_values(by= ['bool','coef'], ascending= [0,1])
print(features_ranking_sort.head(26))
#make data sets for 10,15, 20 questionaire queries
#run model and compare on these sets
key_features_twenty = ['S0097', 'S0196', 'S0266', 'S0237', 'S0250', 'S0284', 'S0006', 'S0126', 'S0190', 'S0206', 'S0195', 'S0088', 'S0340', 'V2041', 'S0333', 'S0300', 'S0026', 'V2091', 'S0018', 'S0079']
key_features_fifteen = ['S0097', 'S0196', 'S0266', 'S0237', 'S0250', 'S0284', 'S0006', 'S0190', 'S0206', 'S0195', 'S0194', 'S0088', 'S0340', 'V2041', 'S0333']
key_features_ten = ['S0097', 'S0196', 'S0266', 'S0237', 'S0250', 'S0284', 'S0006', 'S0190', 'S0206', 'S0195']
df_twenty_queries_data_incident = pos_incd_only_df[key_features_twenty]
df_twenty_queries_data_noincident = no_incd_only_df[key_features_twenty]
df_fifteen_queries_data_incident = pos_incd_only_df[key_features_fifteen]
df_fifteen_queries_data_noincident = no_incd_only_df[key_features_fifteen]
df_ten_queries_data_incident = pos_incd_only_df[key_features_ten]
df_ten_queries_data_noincident = no_incd_only_df[key_features_ten]
X = df_twenty_queries_data_incident
y = pos_incd_only_df['ESCAL']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
clf_train = LogisticRegression(random_state=0).fit(X_train,y_train)
y_pred = clf_train.predict(X_test)
y_pred_proba = clf_train.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(fpr,tpr,label="data 1, auc="+str(auc))
plt.legend(loc=4)
score = clf_train.score(X_test, y_test)
print(score)
cfm = metrics.confusion_matrix(y_test, y_pred)
print(cfm)
print(clf_train.intercept_)
print(clf_train.coef_)
A = df_twenty_queries_data_incident
b = pos_incd_only_df['ESCAL']
A_challenge = df_twenty_queries_data_noincident
b_challenge = no_incd_only_df['ESCAL']
clf_train = LogisticRegression(random_state=0).fit(A,b)
b_pred = clf_train.predict(A_challenge)
score = clf_train.score(A_challenge, b_challenge)
print(score)
b_pred_proba = clf_train.predict_proba(A_challenge)[::,1]
fpr, tpr, _ = metrics.roc_curve(b_challenge, b_pred_proba)
auc = metrics.roc_auc_score(b_challenge, b_pred_proba)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(fpr,tpr,label="data 1, auc="+str(auc))
plt.legend(loc=4)
cfm = metrics.confusion_matrix(b_challenge, b_pred)
print(cfm)
print(clf_train.intercept_)
print(clf_train.coef_)
X = df_fifteen_queries_data_incident
y = pos_incd_only_df['ESCAL']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
clf_train = LogisticRegression(random_state=0).fit(X_train,y_train)
y_pred = clf_train.predict(X_test)
y_pred_proba = clf_train.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(fpr,tpr,label="data 1, auc="+str(auc))
plt.legend(loc=4)
score = clf_train.score(X_test, y_test)
print(score)
cfm = metrics.confusion_matrix(y_test, y_pred)
cfm
X = df_ten_queries_data_incident
y = pos_incd_only_df['ESCAL']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
clf_train = LogisticRegression(random_state=0).fit(X_train,y_train)
y_pred = clf_train.predict(X_test)
y_pred_proba = clf_train.predict_proba(X_test)[::,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(fpr,tpr,label="data 1, auc="+str(auc))
plt.legend(loc=4)
score = clf_train.score(X_test, y_test)
print(score)
cfm = metrics.confusion_matrix(y_test, y_pred)
cfm
|
[
"58261016+baronessvonblixen@users.noreply.github.com"
] |
58261016+baronessvonblixen@users.noreply.github.com
|
ff7a8052353fba5d6f3f55a8adde037cec69a56c
|
96d6e8e34bc10c769a0407612deaeb6255dea449
|
/demos/time/time_service.py
|
efc9605cf52f1e0a480003dddc759c3d7e7933d2
|
[
"MIT"
] |
permissive
|
tomerfiliba-org/rpyc
|
d02115577b478ee49b1348f68f6e6905832847f3
|
9632c6db04b321b2fbcef3b99760436633874c29
|
refs/heads/master
| 2023-08-06T16:30:42.277071
| 2023-06-10T01:55:50
| 2023-06-10T01:55:50
| 145,733
| 524
| 62
|
NOASSERTION
| 2023-06-10T01:55:51
| 2009-03-08T11:23:29
|
Python
|
UTF-8
|
Python
| false
| false
| 186
|
py
|
import time
from rpyc import Service
class TimeService(Service):
def exposed_get_utc(self):
return time.time()
def exposed_get_time(self):
return time.ctime()
|
[
"tomerf@il.ibm.com"
] |
tomerf@il.ibm.com
|
819b17f1e0d7402b82de668bfef664f59a4fba1e
|
87aebf520931c1a94dc86c3af0806c7f439ccb65
|
/checkout/migrations/0005_auto_20210102_1730.py
|
8bf055961f2c008d388aa1e492c75f532d9af196
|
[] |
no_license
|
Code-Institute-Submissions/beauty4u
|
4220071f40ae28bd30f8656c77956392800600c9
|
63c3f4d1692fd3228d2acc69ab2b700f9591ad5d
|
refs/heads/master
| 2023-02-27T00:58:29.271850
| 2021-01-30T14:24:43
| 2021-01-30T14:24:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# Generated by Django 3.0.7 on 2021-01-02 17:30
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('checkout', '0004_auto_20201223_2255'),
]
operations = [
migrations.AlterField(
model_name='order',
name='country',
field=django_countries.fields.CountryField(max_length=2),
),
]
|
[
"davidosongschool@gmail.com"
] |
davidosongschool@gmail.com
|
691d3c6b1bc19c12fae79418d90e9c0310fb8606
|
66e44eae3739e63cc9665d532ac1c394afdeabf1
|
/trxFit/trx/pagina/migrations/0013_auto_20171118_2153.py
|
8c266c605092b9c11af62f0cf5ab040073065cef
|
[] |
no_license
|
juandiemore/trxFit
|
6928fda3a87e6e927b942c612cb9d56af40ec1ce
|
f8f5d7cad3556f76fefff58a0aa1c425a3af6d57
|
refs/heads/master
| 2021-08-17T01:57:24.356073
| 2017-11-20T17:07:02
| 2017-11-20T17:07:02
| 111,346,655
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-19 02:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pagina', '0012_auto_20171118_2148'),
]
operations = [
migrations.AlterField(
model_name='clase',
name='fecha',
field=models.DateField(),
),
]
|
[
"33384270+juandiemore@users.noreply.github.com"
] |
33384270+juandiemore@users.noreply.github.com
|
e138a5128d3e3e438bd4707a8f2d9b4478c022c6
|
74008bd3612b2bb8bc780d7b86dccaeba29f21a2
|
/Python/SoundingKeyboardMouse.py
|
7643ded824af208635403fed7caf15f9590f6b5d
|
[] |
no_license
|
ATAccessGroup/Scripting-Recipes-for-AT
|
94c120c60add7a0746574d391f46a4681ae1ccfc
|
2becedf9c5aa5c9572fe5dfa302859bd74e9dfe3
|
refs/heads/master
| 2020-04-05T23:41:04.145809
| 2017-06-05T12:08:49
| 2017-06-05T12:08:49
| 4,162,779
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,255
|
py
|
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except AttributeError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys, termios # import termios now or else you'll get the Unix version on the Mac
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt #see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0]==0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
if __name__ == '__main__': # a little test
print 'Press a key'
inkey = _Getch()
import sys
for i in xrange(sys.maxint):
k=inkey()
if k<>'':break
print 'you pressed ',k
###
|
[
"willwade@gmail.com"
] |
willwade@gmail.com
|
42e5956217bb73d7bf84ce47a3cd84c808b6c11f
|
2130aa6efd199c612b03b0cd949375dd828dd218
|
/acoustid/data/submission.py
|
b3897ac10f2f83bd8c45d4bea70e680730d28066
|
[
"MIT"
] |
permissive
|
bazo/acoustid-server
|
4774965b8a16555100c972c09582bb09ea10df3f
|
56b11f1bbd093e23970d9baae2a2655ecea34aee
|
refs/heads/master
| 2020-05-27T21:08:29.078822
| 2017-01-02T20:19:42
| 2017-01-02T20:19:42
| 83,599,159
| 1
| 0
| null | 2017-03-01T20:36:20
| 2017-03-01T20:36:20
| null |
UTF-8
|
Python
| false
| false
| 6,283
|
py
|
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import logging
from sqlalchemy import sql
from acoustid import tables as schema, const
from acoustid.data.fingerprint import lookup_fingerprint, insert_fingerprint, inc_fingerprint_submission_count, FingerprintSearcher
from acoustid.data.musicbrainz import resolve_mbid_redirect
from acoustid.data.track import insert_track, insert_mbid, insert_puid, merge_tracks, insert_track_meta, can_add_fp_to_track, can_merge_tracks, insert_track_foreignid
logger = logging.getLogger(__name__)
def insert_submission(conn, data):
"""
Insert a new submission into the database
"""
with conn.begin():
insert_stmt = schema.submission.insert().values({
'fingerprint': data['fingerprint'],
'length': data['length'],
'bitrate': data.get('bitrate'),
'mbid': data.get('mbid'),
'puid': data.get('puid'),
'source_id': data.get('source_id'),
'format_id': data.get('format_id'),
'meta_id': data.get('meta_id'),
'foreignid_id': data.get('foreignid_id'),
})
id = conn.execute(insert_stmt).inserted_primary_key[0]
logger.debug("Inserted submission %r with data %r", id, data)
return id
def import_submission(conn, submission, index=None):
"""
Import the given submission into the main fingerprint database
"""
with conn.begin():
update_stmt = schema.submission.update().where(
schema.submission.c.id == submission['id'])
conn.execute(update_stmt.values(handled=True))
mbids = []
if submission['mbid']:
mbids.append(resolve_mbid_redirect(conn, submission['mbid']))
logger.info("Importing submission %d with MBIDs %s",
submission['id'], ', '.join(mbids))
num_unique_items = len(set(submission['fingerprint']))
if num_unique_items < const.FINGERPRINT_MIN_UNIQUE_ITEMS:
logger.info("Skipping, has only %d unique items", num_unique_items)
return
num_query_items = conn.execute("SELECT icount(acoustid_extract_query(%(fp)s))", dict(fp=submission['fingerprint']))
if not num_query_items:
logger.info("Skipping, no data to index")
return
searcher = FingerprintSearcher(conn, index, fast=False)
searcher.min_score = const.TRACK_MERGE_THRESHOLD
matches = searcher.search(submission['fingerprint'], submission['length'])
fingerprint = {
'id': None,
'track_id': None,
'fingerprint': submission['fingerprint'],
'length': submission['length'],
'bitrate': submission['bitrate'],
'format_id': submission['format_id'],
}
if matches:
match = matches[0]
all_track_ids = set()
possible_track_ids = set()
for m in matches:
if m['track_id'] in all_track_ids:
continue
all_track_ids.add(m['track_id'])
logger.debug("Fingerprint %d with track %d is %d%% similar", m['id'], m['track_id'], m['score'] * 100)
if can_add_fp_to_track(conn, m['track_id'], submission['fingerprint'], submission['length']):
possible_track_ids.add(m['track_id'])
if not fingerprint['track_id']:
fingerprint['track_id'] = m['track_id']
if m['score'] > const.FINGERPRINT_MERGE_THRESHOLD:
fingerprint['id'] = m['id']
if len(possible_track_ids) > 1:
for group in can_merge_tracks(conn, possible_track_ids):
if fingerprint['track_id'] in group and len(group) > 1:
fingerprint['track_id'] = min(group)
group.remove(fingerprint['track_id'])
merge_tracks(conn, fingerprint['track_id'], list(group))
break
if not fingerprint['track_id']:
fingerprint['track_id'] = insert_track(conn)
if not fingerprint['id']:
fingerprint['id'] = insert_fingerprint(conn, fingerprint, submission['id'], submission['source_id'])
else:
inc_fingerprint_submission_count(conn, fingerprint['id'], submission['id'], submission['source_id'])
for mbid in mbids:
insert_mbid(conn, fingerprint['track_id'], mbid, submission['id'], submission['source_id'])
if submission['puid'] and submission['puid'] != '00000000-0000-0000-0000-000000000000':
insert_puid(conn, fingerprint['track_id'], submission['puid'], submission['id'], submission['source_id'])
if submission['meta_id']:
insert_track_meta(conn, fingerprint['track_id'], submission['meta_id'], submission['id'], submission['source_id'])
if submission['foreignid_id']:
insert_track_foreignid(conn, fingerprint['track_id'], submission['foreignid_id'], submission['id'], submission['source_id'])
return fingerprint
def import_queued_submissions(conn, index=None, limit=100, ids=None):
"""
Import the given submission into the main fingerprint database
"""
query = schema.submission.select(schema.submission.c.handled == False).\
order_by(schema.submission.c.mbid.nullslast(), schema.submission.c.id.desc())
if ids is not None:
query = query.where(schema.submission.c.id.in_(ids))
if limit is not None:
query = query.limit(limit)
count = 0
for submission in conn.execute(query):
import_submission(conn, submission, index=index)
count += 1
logger.debug("Imported %d submissions", count)
return count
def lookup_submission_status(db, ids):
if not ids:
return {}
source = schema.fingerprint_source.\
join(schema.fingerprint).\
join(schema.track)
query = sql.select([schema.fingerprint_source.c.submission_id, schema.track.c.gid], from_obj=source).\
where(schema.fingerprint_source.c.submission_id.in_(ids))
results = {}
for id, track_gid in db.execute(query):
results[id] = track_gid
return results
|
[
"lalinsky@gmail.com"
] |
lalinsky@gmail.com
|
ba257c7a32b2ec4aa2b22fc7c7b92e305f9f957d
|
5b3caf64b77161748d0929d244798a8fb914d9c5
|
/Python Excel Examples/GeneralApiDemo/convertInRequest.py
|
b196e1d1ec4e23d1a9d95f987f3a2b8969ea75af
|
[] |
no_license
|
EiceblueCloud/Spire.Cloud.Excel
|
0d56864991eaf8d44c38f21af70db614b1d804b7
|
d9845d5cefd15a3ab408b2c9f80828a4767e2b82
|
refs/heads/master
| 2021-07-20T23:44:39.068568
| 2021-07-15T03:04:49
| 2021-07-15T03:04:49
| 230,225,396
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
import spirecloudexcel
from spirecloudexcel.configuration import Configuration as ExcelConfiguration
from spirecloudexcel.api.general_api import GeneralApi
appId = "your id"
appKey = "your key"
baseUrl = "https://api.e-iceblue.cn"
configuration = ExcelConfiguration(appId, appKey,baseUrl)
api = spirecloudexcel.api.general_api.GeneralApi(configuration)
format = "Pdf" #Supported formats: Xlsx/Xls/Xlsb/Ods/Pdf/Xps/Ps/Pcl
file = "D:/inputFile/charts.xlsx"
password = ""
result = api.convert_in_request(format,file=file, password=password)
|
[
"noreply@github.com"
] |
noreply@github.com
|
1d478e60d70d7e060c9eea09d83b00c7e8bb0fee
|
fbe504abceda961d0ce168e4d54c9e51f6ba213f
|
/utils/model.py
|
af2bf3b0ebe2b6371522f1c4e2454021095ce1e7
|
[
"MIT"
] |
permissive
|
SourabhSomdeve/ANN_implementation
|
0ad50be5637a508b9f1a134b27a34ebf5dc5d8f0
|
077b26fa447ceec6c586c2dde86d18ce3dca4b15
|
refs/heads/main
| 2023-08-18T00:19:50.657431
| 2021-10-01T15:09:46
| 2021-10-01T15:09:46
| 412,318,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
from contextlib import nullcontext
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
import logging
logger = logging.getLogger(__name__)
class ANN_model():
def __init__(self,epochs):
self.epochs = epochs
self.model_clf = None
def fit(self,X_train,y_train,X_valid,y_valid):
LAYERS = [
tf.keras.layers.Flatten(input_shape=[28,28], name="inputLayer"),
tf.keras.layers.Dense(300, activation="relu", name="hiddenLayer1"),
tf.keras.layers.Dense(100, activation="relu", name="hiddenLayer2"),
tf.keras.layers.Dense(10, activation="softmax", name="outputLayer")
]
self.model_clf = tf.keras.models.Sequential(LAYERS)
LOSS_FUNCTION = "sparse_categorical_crossentropy"
OPTIMIZER = "SGD"
METRICS = ["accuracy"]
self.model_clf.compile(loss=LOSS_FUNCTION, optimizer=OPTIMIZER, metrics=METRICS)
VALIDATION = (X_valid, y_valid)
logger.info("----Training started------")
history = self.model_clf.fit(X_train, y_train, epochs=self.epochs, validation_data=VALIDATION)
def predict(self,X_test,y_test):
logger.info("--Evaluating on the Test data--")
self.model_clf.evaluate(X_test, y_test)
logger.info("Showing the result of first 3 data points")
X_new = X_test[:3]
y_prob = self.model_clf.predict(X_new)
Y_pred= np.argmax(y_prob, axis=-1)
for img_array, pred, actual in zip(X_new, Y_pred, y_test[:3]):
plt.imshow(img_array, cmap="binary")
plt.title(f"predicted: {pred}, Actual: {actual}")
plt.axis("off")
plt.show()
print("---"*20)
return self.model_clf
|
[
"sourabhsomdev@winjit.com"
] |
sourabhsomdev@winjit.com
|
4ad97214cab242cab7be5cd4232d8eca3d8ff676
|
89d920e8de469466f45172948082284b24ee8ca6
|
/sdt/bin/sdpostpipelineutils.py
|
4ea778b4bbc53e16782ee12c4bdf0fc87ea83537
|
[] |
no_license
|
cedadev/synda
|
fb22cce909e8b4fb8e51e7ab506c337d6ec5d9d2
|
9b9fa5b9b13719e1307f093d208256e359e501af
|
refs/heads/master
| 2021-09-24T03:56:21.545769
| 2020-09-16T10:34:44
| 2020-09-16T10:34:44
| 187,797,897
| 1
| 0
| null | 2020-01-28T12:56:15
| 2019-05-21T08:45:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
#!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
##################################
# @program synda
# @description climate models data transfer program
# @copyright Copyright “(c)2009 Centre National de la Recherche Scientifique CNRS.
# All Rights Reserved”
# @license CeCILL (https://raw.githubusercontent.com/Prodiguer/synda/master/sdt/doc/LICENSE)
##################################
"""This module contains post pipeline generic functions. """
import sdapp
import sdconst
from sdexception import SDException
def exists_attached_parameter(file_,name):
if 'attached_parameters' in file_:
if name in file_['attached_parameters']:
return True
else:
return False
else:
return False
def get_attached_parameter(file_,name,default=None):
if 'attached_parameters' in file_:
return file_['attached_parameters'].get(name,default)
else:
return default
def get_attached_parameter__global(files,name):
"""This function assumes all files have the same value for the <name> attribute."""
if len(files)>0:
file_=files[0] # retrieve first file's (arbitrary)
return get_attached_parameter(file_,name)
else:
return None
# the two methods below is to have some abstration over file type
def get_functional_identifier_value(f):
name=get_functional_identifier_name(f)
if name in f:
functional_id=f[name]
else:
raise SDException('SYDUTILS-020','Incorrect identifier (%s)'%name)
return functional_id
def get_functional_identifier_name(f):
if f["type"]==sdconst.SA_TYPE_FILE:
functional_id='file_functional_id'
elif f["type"]==sdconst.SA_TYPE_DATASET:
functional_id='dataset_functional_id'
else:
raise SDException('SYDUTILS-028','Incorrect type (%s)'%f["type"])
return functional_id
|
[
"jerome@TOSH001.home"
] |
jerome@TOSH001.home
|
c2a00911624181706a89e7875616e367f73ced08
|
ae8531f97a4a457534fb5d77051c3eb8dcd96ae0
|
/chapter3/23extract_sections.py
|
d934392ef196460f9745ea6ef3f09dd51baf2ab0
|
[] |
no_license
|
Taurin190/NLP100
|
a273bb3976d34d55eb7e75404d4e71a684e8ab5a
|
2d2de879aa289ff3c08ef8bfb1234b6e87030bdc
|
refs/heads/master
| 2021-06-25T09:49:26.303027
| 2020-11-02T23:29:28
| 2020-11-02T23:29:28
| 152,951,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
# coding: utf-8
import json
import re
wiki_json = open("jawiki-country.json", "r")
for wiki_line in wiki_json:
wiki_info = json.loads(wiki_line)
wiki_texts = wiki_info['text'].split("\n")
for wiki_text in wiki_texts:
category_name = re.search("(?<=====)\w+", wiki_text)
if category_name is not None:
print(category_name.group(0) + " 3")
continue
category_name = re.search("(?<====)\w+", wiki_text)
if category_name is not None:
print(category_name.group(0) + " 2")
continue
category_name = re.search("(?<===)\w+", wiki_text)
if category_name is not None:
print(category_name.group(0) + " 1")
|
[
"taura.koichi@gmail.com"
] |
taura.koichi@gmail.com
|
ec3da0a8ee514d27186134388ab0aeaa6ef2bb7b
|
16132279509374c6dd94693cfc637476449ee1d6
|
/leetcode/207. Course Schedule.py
|
e0147d3bc00e7096b833055b0925a7dad9b0cac9
|
[
"MIT"
] |
permissive
|
isaiahnields/algorithms
|
89f1ee343e057b159a227f852b7591c408dd503a
|
065b95a007dab3f5e375e73a681e7a6e344b41f1
|
refs/heads/master
| 2020-03-28T20:08:50.662795
| 2019-02-25T20:32:59
| 2019-02-25T20:32:59
| 149,044,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
class Solution:
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
d = dict()
for p in prerequisites:
if p[0] in d:
d[p[0]].append(p[1])
else:
d[p[0]] = [p[1]]
if len(d) == 0: return True
while len(d) != 0:
if not self.helper(d, list(d.keys())[0], set()):
return False
return True
def helper(self, d, k, visited):
if k in visited:
return False
if k not in d:
return True
visited.add(k)
no_cycle = True
v = d[k]
del d[k]
for i in v:
no_cycle &= self.helper(d, i, visited)
return no_cycle
|
[
"noreply@github.com"
] |
noreply@github.com
|
65ee59c7c08b8852c696c082da9dae5e5c712f37
|
c1a6e2b0b4ba380e6d7b8c5484309fbd1ffd6e43
|
/mp2/uttt.py
|
f7c77c37d8b12e71261306f2bc84c6f663ddedc5
|
[] |
no_license
|
jasonwhwang/cs440
|
af001285e7aeef24902a02d757f316b5c04cc5dc
|
317469c417f965a684e50bf478a616819c6946e3
|
refs/heads/master
| 2020-04-19T01:15:07.489709
| 2019-05-02T23:13:21
| 2019-05-02T23:13:21
| 167,867,314
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,182
|
py
|
from time import sleep
from math import inf
from random import randint
class ultimateTicTacToe:
def __init__(self):
"""
Initialization of the game.
"""
self.board=[['_','_','_','_','_','_','_','_','_'],
['_','_','_','_','_','_','_','_','_'],
['_','_','_','_','_','_','_','_','_'],
['_','_','_','_','_','_','_','_','_'],
['_','_','_','_','_','_','_','_','_'],
['_','_','_','_','_','_','_','_','_'],
['_','_','_','_','_','_','_','_','_'],
['_','_','_','_','_','_','_','_','_'],
['_','_','_','_','_','_','_','_','_']]
self.maxPlayer='X'
self.minPlayer='O'
self.maxDepth=3
#The start indexes of each local board
self.globalIdx=[(0,0),(0,3),(0,6),(3,0),(3,3),(3,6),(6,0),(6,3),(6,6)]
#Start local board index for reflex agent playing
self.startBoardIdx=8
# self.startBoardIdx=randint(0,8)
#utility value for reflex offensive and reflex defensive agents
self.winnerMaxUtility=10000
self.twoInARowMaxUtility=500
self.preventThreeInARowMaxUtility=100
self.cornerMaxUtility=30
self.winnerMinUtility=-10000
self.twoInARowMinUtility=-100
self.preventThreeInARowMinUtility=-500
self.cornerMinUtility=-30
def printGameBoard(self):
"""
This function prints the current game board.
"""
print('\n'.join([' '.join([str(cell) for cell in row]) for row in self.board[:3]])+'\n')
print('\n'.join([' '.join([str(cell) for cell in row]) for row in self.board[3:6]])+'\n')
print('\n'.join([' '.join([str(cell) for cell in row]) for row in self.board[6:9]])+'\n')
# ----------------------------------------------------------------
# Helper Functions
# ----------------------------------------------------------------
def drawToBoard(self, coor, isMax):
if isMax:
self.board[coor[0]][coor[1]] = self.maxPlayer
else:
self.board[coor[0]][coor[1]] = self.minPlayer
def removeFromBoard(self, coor):
self.board[coor[0]][coor[1]] = '_'
def getBoardIdx(self, coor):
modY = coor[0] % 3
modX = coor[1] % 3
if modY == 0 and modX == 0:
return 0
if modY == 0 and modX == 1:
return 1
if modY == 0 and modX == 2:
return 2
if modY == 1 and modX == 0:
return 3
if modY == 1 and modX == 1:
return 4
if modY == 1 and modX == 2:
return 5
if modY == 2 and modX == 0:
return 6
if modY == 2 and modX == 1:
return 7
if modY == 2 and modX == 2:
return 8
def getLocalMoves(self, currBoardIdx):
localMovesList = []
localIdx = self.globalIdx[currBoardIdx]
for row in range(localIdx[0], localIdx[0]+3):
for col in range(localIdx[1], localIdx[1]+3):
if self.board[row][col] == '_':
localMovesList.append((row,col))
return localMovesList
def getAllMoves(self):
localMovesList = []
for row in range(0, 9):
for col in range(0, 9):
if self.board[row][col] == '_':
localMovesList.append((row,col))
return localMovesList
def horizontalCount(self, y, x, isMax):
offense = self.maxPlayer
defense = self.minPlayer
tiar = 0
prevent = 0
for row in range(y, y+3):
oCount = 0
dCount = 0
nCount = 0
for col in range(x, x+3):
if self.board[row][col] == offense:
oCount += 1
elif self.board[row][col] == defense:
dCount += 1
else:
nCount += 1
if oCount == 2 and nCount == 1:
tiar += 1
if oCount == 1 and dCount == 2:
prevent += 1
return tiar, prevent
def verticalCount(self, y, x, isMax):
offense = self.maxPlayer
defense = self.minPlayer
tiar = 0
prevent = 0
for col in range(x, x+3):
oCount = 0
dCount = 0
nCount = 0
for row in range(y, y+3):
if self.board[row][col] == offense:
oCount += 1
elif self.board[row][col] == defense:
dCount += 1
else:
nCount += 1
if oCount == 2 and nCount == 1:
tiar += 1
if oCount == 1 and dCount == 2:
prevent += 1
return tiar, prevent
def diagonalCount(self, y, x, isMax):
offense = self.maxPlayer
defense = self.minPlayer
tiar = 0
prevent = 0
oCount = 0
dCount = 0
nCount = 0
for row in range(0, 3):
if self.board[y+row][x+row] == offense:
oCount += 1
elif self.board[y+row][x+row] == defense:
dCount += 1
else:
nCount += 1
if oCount == 2 and nCount == 1:
tiar += 1
if oCount == 1 and dCount == 2:
prevent += 1
oCount = 0
dCount = 0
nCount = 0
for row in range(0, 3):
if self.board[y+2-row][x+row] == offense:
oCount += 1
elif self.board[y+2-row][x+row] == defense:
dCount += 1
else:
nCount += 1
if oCount == 2 and nCount == 1:
tiar += 1
if oCount == 1 and dCount == 2:
prevent += 1
return tiar, prevent
def checkLocalWinner(self, currBoardIdx):
localIdx = self.globalIdx[currBoardIdx]
y = localIdx[0]
x = localIdx[1]
# Check Horizontals
for row in range(y, y+3):
if self.board[row][x] == self.maxPlayer and self.board[row][x+1] == self.maxPlayer and self.board[row][x+2] == self.maxPlayer:
return 1
if self.board[row][x] == self.minPlayer and self.board[row][x+1] == self.minPlayer and self.board[row][x+2] == self.minPlayer:
return -1
# Check Verticals
for col in range(x, x+3):
if self.board[y][col] == self.maxPlayer and self.board[y+1][col] == self.maxPlayer and self.board[y+2][col] == self.maxPlayer:
return 1
if self.board[y][col] == self.minPlayer and self.board[y+1][col] == self.minPlayer and self.board[y+2][col] == self.minPlayer:
return -1
# Check Diagonals
if self.board[y][x] == self.maxPlayer and self.board[y+1][x+1] == self.maxPlayer and self.board[y+2][x+2] == self.maxPlayer:
return 1
if self.board[y][x] == self.minPlayer and self.board[y+1][x+1] == self.minPlayer and self.board[y+2][x+2] == self.minPlayer:
return -1
if self.board[y+2][x] == self.maxPlayer and self.board[y+1][x+1] == self.maxPlayer and self.board[y][x+2] == self.maxPlayer:
return 1
if self.board[y+2][x] == self.minPlayer and self.board[y+1][x+1] == self.minPlayer and self.board[y][x+2] == self.minPlayer:
return -1
return 0
def countLocalTwoInARow(self, currBoardIdx, isMax):
tiar = 0
prevent = 0
localIdx = self.globalIdx[currBoardIdx]
y = localIdx[0]
x = localIdx[1]
t, p = self.horizontalCount(y,x,isMax)
tiar += t
prevent += p
t, p = self.verticalCount(y,x,isMax)
tiar += t
prevent += p
t, p = self.diagonalCount(y,x,isMax)
tiar += t
prevent += p
return tiar, prevent
def countCorners(self, currBoardIdx, isMax):
symbol = self.maxPlayer
coor = self.globalIdx[currBoardIdx]
count = 0
if self.board[coor[0]][coor[1]] == symbol:
count += 1
if self.board[coor[0]][coor[1]+2] == symbol:
count += 1
if self.board[coor[0]+2][coor[1]] == symbol:
count += 1
if self.board[coor[0]+2][coor[1]+2] == symbol:
count += 1
return count
# _________________________________________________________________
def evaluatePredifined(self, isMax):
"""
This function implements the evaluation function for ultimate tic tac toe for predifined agent.
input args:
isMax(bool): boolean variable indicates whether it's maxPlayer or minPlayer.
True for maxPlayer, False for minPlayer
output:
score(float): estimated utility score for maxPlayer or minPlayer
"""
#YOUR CODE HERE
score = 0
tiar = 0
prevent = 0
corners = 0
for count in range(0,9):
tiar, prevent = self.countLocalTwoInARow(count, isMax)
if isMax:
score += tiar*500
score += prevent*100
else:
score -= tiar*100
score -= prevent*500
if score == 0:
for count in range(0,9):
corners = self.countCorners(count, isMax)
if isMax:
score += corners*30
else:
score -= corners*30
# self.printGameBoard()
# input(str(score) + "->")
return score
def evaluateDesigned(self, isMax):
"""
This function implements the evaluation function for ultimate tic tac toe for your own agent.
input args:
isMax(bool): boolean variable indicates whether it's maxPlayer or minPlayer.
True for maxPlayer, False for minPlayer
output:
score(float): estimated utility score for maxPlayer or minPlayer
"""
#YOUR CODE HERE
score=0
return score
def checkMovesLeft(self):
"""
This function checks whether any legal move remains on the board.
output:
movesLeft(bool): boolean variable indicates whether any legal move remains
on the board.
"""
#YOUR CODE HERE
# movesLeft=True
for row in range(0, 9):
for col in range(0, 9):
if self.board[row][col] == '_':
return True
return False
def checkWinner(self):
#Return termimnal node status for maximizer player 1-win,0-tie,-1-lose
"""
This function checks whether there is a winner on the board.
output:
winner(int): Return 0 if there is no winner.
Return 1 if maxPlayer is the winner.
Return -1 if miniPlayer is the winner.
"""
#YOUR CODE HERE
winner=0
for count in range(0,9):
winner = self.checkLocalWinner(count)
if winner != 0:
return winner
return winner
def alphabeta(self,depth,currBoardIdx,alpha,beta,isMax):
"""
This function implements alpha-beta algorithm for ultimate tic-tac-toe game.
input args:
depth(int): current depth level
currBoardIdx(int): current local board index
alpha(float): alpha value
beta(float): beta value
isMax(bool):boolean variable indicates whether it's maxPlayer or minPlayer.
True for maxPlayer, False for minPlayer
output:
bestValue(float):the bestValue that current player may have
"""
#YOUR CODE HERE
bestValue=0.0
return bestValue
def minimax(self, depth, currBoardIdx, isMax):
"""
This function implements minimax algorithm for ultimate tic-tac-toe game.
input args:
depth(int): current depth level
currBoardIdx(int): current local board index
alpha(float): alpha value
beta(float): beta value
isMax(bool):boolean variable indicates whether it's maxPlayer or minPlayer.
True for maxPlayer, False for minPlayer
output:
bestValue(float):the bestValue that current player may have
"""
#YOUR CODE HERE
allValues = []
if depth >= self.maxDepth:
winner = self.checkLocalWinner(currBoardIdx)
if isMax:
if winner == 1:
return 10000
if winner == -1:
return -10000
else:
if winner == 1:
return -10000
if winner == -1:
return 10000
return self.evaluatePredifined(isMax)
currValidMoves = self.getLocalMoves(currBoardIdx)
if not currValidMoves:
winner = self.checkLocalWinner(currBoardIdx)
if isMax:
if winner == 1:
return 10000
if winner == -1:
return -10000
else:
if winner == 1:
return -10000
if winner == -1:
return 10000
return self.evaluatePredifined(isMax)
isMax = not isMax
for validMove in currValidMoves:
self.drawToBoard(validMove, isMax)
currValue = self.minimax(depth+1, self.getBoardIdx(validMove), isMax)
self.removeFromBoard(validMove)
allValues.append(currValue)
isMax = not isMax
if isMax:
return max(allValues)
else:
return min(allValues)
def playGamePredifinedAgent(self,maxFirst,isMinimax):
"""
This function implements the processes of the game of predifined offensive agent vs defensive agent.
input args:
maxFirst(bool): boolean variable indicates whether maxPlayer or minPlayer plays first.
True for maxPlayer plays first, and False for minPlayer plays first.
isMinimax(bool):boolean variable indicates whether it's using minimax or alpha-beta pruning algorithm.
True is minimax and False is alpha-beta.
output:
bestMove(list of tuple): list of bestMove coordinates at each step
bestValue(list of float): list of bestValue at each move
expandedNodes(list of int): list of expanded nodes at each move
gameBoards(list of 2d lists): list of game board positions at each move
winner(int): 1 for maxPlayer is the winner, -1 for minPlayer is the winner, and 0 for tie.
"""
bestMove=[]
bestValue=[]
gameBoards=[]
winner=0
expandedNodes = []
currBoardIdx = self.startBoardIdx
isMax = maxFirst
originalMax = self.maxPlayer
originalMin = self.minPlayer
currBestMove = None
for count in range(0,81):
currValidMoves = self.getLocalMoves(currBoardIdx)
if not currValidMoves:
currValidMoves = self.getAllMoves()
if not currValidMoves:
break
currBestMove = currValidMoves[0]
currBestValue = 0.0
if isMinimax:
for validMove in currValidMoves:
self.drawToBoard(validMove,isMax)
tryValue = self.minimax(1, self.getBoardIdx(validMove), isMax)
self.removeFromBoard(validMove)
if tryValue > currBestValue:
currBestMove = validMove
currBestValue = tryValue
self.drawToBoard(currBestMove, isMax)
bestMove.append(currBestMove)
bestValue.append(currBestValue)
gameBoards.append(self.board)
if self.checkLocalWinner(currBoardIdx) != 0:
self.maxPlayer = originalMax
self.minPlayer = originalMin
winner = self.checkLocalWinner(currBoardIdx)
break
currBoardIdx = self.getBoardIdx(currBestMove)
temp = self.maxPlayer
self.maxPlayer = self.minPlayer
self.minPlayer = temp
# self.printGameBoard()
# input(str(count) + "----->")
self.printGameBoard()
return gameBoards, bestMove, expandedNodes, bestValue, winner
def playGameYourAgent(self):
"""
This function implements the processes of the game of your own agent vs predifined offensive agent.
input args:
output:
bestMove(list of tuple): list of bestMove coordinates at each step
gameBoards(list of 2d lists): list of game board positions at each move
winner(int): 1 for maxPlayer is the winner, -1 for minPlayer is the winner, and 0 for tie.
"""
#YOUR CODE HERE
bestMove=[]
gameBoards=[]
winner=0
return gameBoards, bestMove, winner
def playGameHuman(self):
"""
This function implements the processes of the game of your own agent vs a human.
output:
bestMove(list of tuple): list of bestMove coordinates at each step
gameBoards(list of 2d lists): list of game board positions at each move
winner(int): 1 for maxPlayer is the winner, -1 for minPlayer is the winner, and 0 for tie.
"""
#YOUR CODE HERE
bestMove=[]
gameBoards=[]
winner=0
return gameBoards, bestMove, winner
if __name__=="__main__":
uttt=ultimateTicTacToe()
gameBoards, bestMove, expandedNodes, bestValue, winner=uttt.playGamePredifinedAgent(True, True)
if winner == 1:
print("The winner is maxPlayer!!!")
elif winner == -1:
print("The winner is minPlayer!!!")
else:
print("Tie. No winner:(")
|
[
"jasonwhwang@gmail.com"
] |
jasonwhwang@gmail.com
|
099428a52dc8ac12fbc8b9aabf2094baabd54358
|
34932f10f59b05b82efdd4144c58cb09226330bc
|
/redditCrawler/reddit.py
|
2c96eef4808f046680575b5b4442eab39f24f292
|
[] |
no_license
|
AkinoRito/Scrapy
|
be611b9e1e5cfc6c467e2ae89043753ddeae8817
|
476ce6d9ca5e621171076d142b79ed0a25b8d275
|
refs/heads/master
| 2020-04-12T01:51:58.278273
| 2019-04-22T02:53:20
| 2019-04-22T02:53:20
| 162,230,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,755
|
py
|
from selenium import webdriver
import time
URL = 'https://www.reddit.com/r/THEMATRIXAI'
RELOAD_TIMES = 50
OUTPUT_RESULT_PATH = 'C:\\Users\\gjy\\Desktop\\WORK\\crawler\\result000.csv'
def main():
"""
1. initialize a webdriver
2. load and reload the web page
3. crawl data
:return:
"""
browser = webdriver.Chrome()
browser.get(URL)
time.sleep(2)
# 滚动加载页面
js = 'window.scrollTo(0, document.body.scrollHeight);'
for _ in range(RELOAD_TIMES):
browser.execute_script(js)
time.sleep(8)
f_result = open(OUTPUT_RESULT_PATH, 'w', encoding='utf8')
f_result.write("user,time,content,kind\n")
# 分享内容爬取 kind = 'shared'
name_share = browser.find_elements_by_xpath('//*[@class="rpBJOHq2PR60pnwJlUyP0 mos4kc-0 hvBaPD"]'
'/div/div/div/div[2]/article/div[1]/div[1]/div/div/div/a')
time_share = browser.find_elements_by_xpath('//*[@class="rpBJOHq2PR60pnwJlUyP0 mos4kc-0 hvBaPD"]'
'/div/div/div/div[2]/article/div[1]/div[1]/div/div/a')
data_share = browser.find_elements_by_xpath('//*[@class="rpBJOHq2PR60pnwJlUyP0 mos4kc-0 hvBaPD"]'
'/div/div/div/div[2]/article/div[1]/div[3]/a')
print("分享内容个数:", len(name_share))
for i in range(len(name_share)):
f_result.write(
name_share[i].get_attribute('text') + ',' + time_share[i].get_attribute('text') + ',"' +
data_share[i].get_attribute('href') + '",shared\n')
# reddit发布的内容爬取 kind = 'article'
box = browser.find_elements_by_xpath('//*[@class="rpBJOHq2PR60pnwJlUyP0 mos4kc-0 hvBaPD"]/div')
name_article = []
time_article = []
data_article = []
for i in box:
name_article.append(i.find_elements_by_xpath('.//div/div/div[2]/div[1]/div/div[1]/div/a'))
time_article.append(i.find_elements_by_xpath('.//div/div/div[2]/div[1]/div/div[1]/a'))
data_article.append(i.find_elements_by_xpath('.//div/div/div[2]/div[3]/div/div'))
for i, j, k in zip(name_article, time_article, data_article):
# write into result.csv
if len(k) == 0: # these are subreddits with only a youtube video
continue
if len(i): # user
f_result.write(i[0].get_attribute('text')+',')
else:
f_result.write('null,')
if len(j): # time
f_result.write(j[0].get_attribute('outerText') + ',"')
else:
f_result.write('null,"')
f_result.write(k[0].get_attribute('outerText') + '",article\n')
browser.close()
f_result.close()
if __name__ == '__main__':
main()
|
[
"850219887@qq.com"
] |
850219887@qq.com
|
261eda2a30079b27ec921e7697eff4cb976bf8c1
|
3e4e1f68baba766699792e8f8cef7f8bbf151688
|
/ex3.py
|
bc1e5a3bd98447229999c02d3f5089193d0bba5b
|
[] |
no_license
|
ziz9/LearnPyHW
|
200def3ab98f00d92e98f826a79f1103398af73b
|
ec9e7739f483ca9506eafe0226596c4d64cd7cf8
|
refs/heads/master
| 2021-01-22T17:52:25.142097
| 2017-10-20T07:33:29
| 2017-10-20T07:33:29
| 85,038,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
# -×- coding: utf-8 -*-
print "I will now count my chickens:"
# why press the enter? 为什么空行?
print "Hence",25+30/6
print "Roosters",100-25*3%4
print "Now I will count the eggs:"
print 3+2+1-5+4%2-1/4+6
print "Is it true that 3+2<5-7?"
print 3+2<5-7
print "What is 3+2?",3+2
print "What is 5-7?",5-7
print "Oh, that's why it's False."
print "How about some more."
print "Is it greater?", 5>-2 # SytaxError:1 ","
print "Is it greater or equal?", 5>=-2
print "Is it less or equal?", 5<=-2
|
[
"zizhenyan9@163.com"
] |
zizhenyan9@163.com
|
d916af6b9d7865400b4724d52ec1fd6dd4af6830
|
233c8f07d93ab3d759327669c0fa27291bd6713a
|
/forms/loginform.py
|
2f4fd4de79018ffdd854563b5c1c0c6ec4185d73
|
[] |
no_license
|
Q1uLe/WEB_project
|
29e8087a06d92e06a50ff3a85b861cc5f56aa7d7
|
28b57e0ed85523c205bd9a84720d1e8bf021cdb0
|
refs/heads/master
| 2023-04-18T09:54:08.990818
| 2021-04-26T11:33:40
| 2021-04-26T11:33:40
| 352,029,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField('Логин', validators=[DataRequired()], _name='username')
password = PasswordField('Пароль', validators=[DataRequired()], _name='password')
remember_me = BooleanField('Запомнить меня')
submit = SubmitField('Войти')
|
[
"abraahaam@yandex.ru"
] |
abraahaam@yandex.ru
|
5ffba38c4fd707487eff037c3eb11119b104b7a2
|
4273135a9c8fd46c47a6871506c02b98a37c5503
|
/example/python/lib_py/optparse_pylib_eg.py
|
6f129479321f532202f766db85eb527701950406
|
[] |
no_license
|
xuyuanxin/notes
|
f31cd6c8bce0357f0ac4114da7330901fce49b41
|
d8fed981a2096843a62bb4a40aa677168e11f88e
|
refs/heads/master
| 2022-05-06T22:54:42.621373
| 2022-04-23T07:26:00
| 2022-04-23T07:26:00
| 25,041,042
| 2
| 2
| null | 2022-04-22T23:36:23
| 2014-10-10T15:18:08
|
C
|
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
from optparse import OptionParser
'''
<yourscript> --file=outfile -q
<yourscript> -f outfile --quiet
<yourscript> --quiet --file outfile
<yourscript> -q -foutfile
<yourscript> -qfoutfile
'''
def main():
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-f", "--file", dest="filename",
help="read data from FILENAME")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
print('(options, args):%s %s'%(options,args))
if len(args) != 1:
#parser.error("incorrect number of arguments")
pass
if options.verbose:
print "reading %s..." % options.filename
print('-------')
if __name__ == "__main__":
main()
'''
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option('--alexa-file', default='data/alexa_100k.csv', help='Alexa file to pull from. Default: %default')
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
print options, args
options.alexa_file
'''
|
[
"xxuyuanxin@163.com"
] |
xxuyuanxin@163.com
|
f2d6eec03504333314ac18b426f6d3c5c515b12c
|
f3534b4b94dafbf834ba3daa900fef70e4e482b9
|
/plot-sampling-choice.py
|
27d7a410e2a1dcddf08483aec2a0da5796beda73
|
[] |
no_license
|
lokhande-vishnu/cs861_ALIS_algo
|
bf098e8442fa56b03d79cd504a24b0da4325d7b3
|
dc5860cd1cc24961e55b24dcbe78291a5262f223
|
refs/heads/master
| 2020-03-15T14:53:37.963345
| 2018-05-14T18:24:54
| 2018-05-14T18:24:54
| 132,199,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
"""
Created on Fri May 4 00:33:14 2018
@author: pydi
"""
import numpy as np
import matplotlib.pyplot as plt
x1 = np.random.normal(-1,1,50)
x2 = np.random.normal(1,1,50)
lab1 = np.random.choice(x1,5) # Negative points
lab2 = np.random.choice(x2,5) # Positive points
w = -1
eta = 0.1
for _ in range(10):
for item in lab1:
if item*w*(-1) < 0:
w = w + eta*item*(-1)
for item in lab2:
if item*w*(1) < 0:
w = w + item*eta*(1)
r = np.arange(-7,7,0.01)
g1 = .05*(1/np.sqrt(2*np.pi))*np.exp(-(r+1)**2/2)
g2 = .05*(1/np.sqrt(2*np.pi))*np.exp(-(r-1)**2/2)
plt.figure(111)
x1_, = plt.plot(x1, np.zeros(x1.shape), 'rx', markersize=10)
x2_, = plt.plot(x2, np.zeros(x1.shape), 'bx', markersize=10)
lab1_, = plt.plot(lab1, np.zeros(lab1.shape), 'r8', markersize=8)
lab2_, = plt.plot(lab2, np.zeros(lab2.shape), 'b8', markersize=8)
g1_, = plt.plot(r, g1, 'r--', label = '- pt density')
g2_, = plt.plot(r, g2, 'b--', label = '+ pt density')
plt.axvline(x = w, color='k', label= 'trained classifier')
plt.text(w, 0.02, 'trained classifier', rotation=90)
l1 = 1+np.abs(w*x1)
l2 = 1+np.abs(w*x2)
xall = np.concatenate((x1,x2))
lall = np.concatenate((l1,l2))
lall = lall/np.sum(lall)
#plt.plot(xall, lall, 'g*')
for j in range(len(xall)):
x = xall[j]
loss = lall[j]
g_, = plt.plot([xall[j], xall[j]], [0.0005, lall[j]], 'g-', label='sampling distribution')
plt.ylabel('Probability Distribution')
plt.xlabel('Data items')
oplt.legend(handles=[g_, g1_, g2_])
plt.show()
|
[
"lokhande@cs.wisc.edu"
] |
lokhande@cs.wisc.edu
|
d634e31486f5044b31ab168805511a33ded6ef6a
|
eacfc1c0b2acd991ec2cc7021664d8e79c9e58f6
|
/ccpnmr2.4/python/ccp/format/marvin/generalIO.py
|
21409931818e74a5fd154a4652c790008a1b86d2
|
[] |
no_license
|
edbrooksbank/ccpnmr2.4
|
cfecb0896dcf8978d796e6327f7e05a3f233a921
|
f279ca9bb2d972b1ce075dad5fcc16e6f4a9496c
|
refs/heads/master
| 2021-06-30T22:29:44.043951
| 2019-03-20T15:01:09
| 2019-03-20T15:01:09
| 176,757,815
| 0
| 1
| null | 2020-07-24T14:40:26
| 2019-03-20T14:59:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,522
|
py
|
"""
======================COPYRIGHT/LICENSE START==========================
generalIO.py: General I/O information for marvin files
Copyright (C) 2007 Wim Vranken (European Bioinformatics Institute)
=======================================================================
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
A copy of this license can be found in ../../../../license/LGPL.license
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
======================COPYRIGHT/LICENSE END============================
for further information, please contact :
- CCPN website (http://www.ccpn.ac.uk/)
- PDBe website (http://www.ebi.ac.uk/pdbe/)
- contact Wim Vranken (wim@ebi.ac.uk)
=======================================================================
If you are using this software for academic purposes, we suggest
quoting the following references:
===========================REFERENCE START=============================
R. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.
Habeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,
H. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The
CCPN project: An interim report on a data model for the NMR community
(Progress report). Nature Struct. Biol. 9, 416-418.
Wim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus
H. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John
Ionides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:
Development of a Software Pipeline. Proteins 59, 687 - 696.
===========================REFERENCE END===============================
"""
from ccp.format.general.formatIO import FormatFile
from ccp.format.general.Constants import defaultMolCode
#####################
# Class definitions #
#####################
class MarvinGenericFile(FormatFile):
def setGeneric(self):
self.format = 'marvin'
self.defaultMolCode = defaultMolCode
|
[
"ejb66@le.ac.uk"
] |
ejb66@le.ac.uk
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.