blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7d0eac6bc6769a63f609d726e612586ed47b6af8
|
e1ae535d8613aae44e8f9eaa4daf50c1e63665b7
|
/multimedia/south_migrations/0026_auto__chg_field_remotestorage_media.py
|
e4f8b05c6dae4836b6317150e40ea7eda035d2ed
|
[] |
no_license
|
teury/django-multimedia
|
48b8fba9abc101286990b1306d85967bd197f08e
|
4ddd5e6d9f4f680e2f4f68cc3616ced8f0fc2a43
|
refs/heads/master
| 2021-01-16T20:50:24.573686
| 2015-04-23T21:22:38
| 2015-04-23T21:22:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,388
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'RemoteStorage.media'
db.alter_column(u'multimedia_remotestorage', 'media_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['multimedia.Media'], null=True, on_delete=models.SET_NULL))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'RemoteStorage.media'
raise RuntimeError("Cannot reverse this migration. 'RemoteStorage.media' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'RemoteStorage.media'
db.alter_column(u'multimedia_remotestorage', 'media_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['multimedia.Media']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'multimedia.encodeprofile': {
'Meta': {'object_name': 'EncodeProfile'},
'command': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'container': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.media': {
'Meta': {'ordering': "(u'-created',)", 'object_name': 'Media'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'profiles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['multimedia.EncodeProfile']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.remotestorage': {
'Meta': {'object_name': 'RemoteStorage'},
'content_hash': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multimedia.Media']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multimedia.EncodeProfile']", 'on_delete': 'models.PROTECT'})
}
}
complete_apps = ['multimedia']
|
[
"jason.bittel@gmail.com"
] |
jason.bittel@gmail.com
|
c6ecf3c59e8d315c1650c67532864af71b386c05
|
4e8b37ca121be19cd3b4e73a6592be2659d8134c
|
/backend/Techfesia2019/accounts/migrations/0005_auto_20190701_1708.py
|
a7113d24504a420a0d91b930fb768ac3673981f3
|
[
"MIT"
] |
permissive
|
masterashu/Techfesia2019
|
365b9b8dc1cb0bc6b613c72632e8b7a2a2a70905
|
8fd82c4867c8d870b82a936fc0f9e80f11ae03e7
|
refs/heads/backend-event-registrations
| 2020-06-10T20:58:40.850415
| 2019-07-27T23:00:21
| 2019-07-27T23:00:21
| 193,744,800
| 1
| 1
|
MIT
| 2019-06-29T17:12:31
| 2019-06-25T16:29:12
|
Python
|
UTF-8
|
Python
| false
| false
| 466
|
py
|
# Generated by Django 2.2.2 on 2019-07-01 11:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20190701_0956'),
]
operations = [
migrations.AlterField(
model_name='institute',
name='name',
field=models.CharField(default='Indian Institute of Information Technology, Sri City', max_length=200, unique=True),
),
]
|
[
"masterashu@live.in"
] |
masterashu@live.in
|
a9d2eeab18066cbc76789aba31dd51329d4f3780
|
9f0b9a8fe27336b8a231a33c6f693ed019a61b6e
|
/blacklinetest.py
|
f6eb1fa445e64a1ab1daa8cf7cc3bd44fcadc93b
|
[] |
no_license
|
Duong-NVH/tool-set
|
e2647cf74fa085eab42fe3f19c852634629e956e
|
e7c5f7f4522e75eefe74e808a07ecf6575c4ebf5
|
refs/heads/main
| 2023-06-15T07:37:30.783287
| 2021-07-09T15:58:12
| 2021-07-09T15:58:12
| 382,987,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
import cv2
import numpy as np
img = cv2.imread('blacklinetest.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi/180, 500)
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imwrite('houghlines3.jpg', img)
|
[
"you@example.com"
] |
you@example.com
|
d7aab2532f25c287a63c8bd8d282163103684f29
|
d7567ee75e48bd7872a1c332d471ff3ce7433cb9
|
/checkout/urls.py
|
233bfb99df176d4ab47c4bae44affd20f8155e9c
|
[] |
no_license
|
sarahbarron/ecommerce
|
30cd0ff26afa5ec9031165b63ecde8c0f7f6086f
|
aba5370fd731e7ec9e677041504f6c3457b0d405
|
refs/heads/master
| 2020-03-17T21:10:56.385918
| 2020-01-17T18:35:28
| 2020-01-17T18:35:28
| 133,947,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from django.conf.urls import url
from .views import checkout
urlpatterns = [
url(r'^$', checkout, name='checkout'),
]
|
[
"sarahflavin@yahoo.com"
] |
sarahflavin@yahoo.com
|
ab932c024897c581d9bb5dd95eef2ee759d421c2
|
bac5ecb5eef06dfe76b9b7bff80faee7485c67dd
|
/.history/django_vuejs_tutorial/django_vuejs/dataiku/models_20200829125121.py
|
c7e0a2d229648bf8a2326333ab23d5a72731658d
|
[] |
no_license
|
MChrys/dataiku
|
fb1e48401d544cbcc5a80a0a27668dc9d2d196e5
|
6091b24f565224260a89246e29c0a1cbb72f58ed
|
refs/heads/master
| 2022-12-16T11:06:13.896643
| 2020-09-12T19:03:51
| 2020-09-12T19:03:51
| 293,287,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,147
|
py
|
from django.db import models
from django.utils import timezone
class Task(models.Model):
'''
All task that could be apply to a specific account
'''
name = models.CharField(max_length=60, primary_key=True)
description = models.CharField(max_length=510, null=True, blank=True)
supertask = models.ForeignKey('self',null=True, blank=True, on_delete=models.SET_NULL)
class Dataiku_account(models.Model):
STATUS = (
('in operation', 'in operation'),
('avaible', 'avaible')
)
email = models.CharField(max_length=60, primary_key=True)
password = models.CharField(max_length=255, null=True, blank=True)
#task = models.CharField(max_length=255, null=True, blank=True)
status = models.CharField(max_length=255, null=True, blank=True, choices = STATUS)
def __str__(self):
return self.email
class Operation(models.Model):
'''
A running Task like : validate this course or take this QCM
'''
creation = models.DateTimeField( editable = False)
STATUS = (
('pending', 'pending'),
('running', 'running'),
('done', 'done')
)
task = models.OneToOneField(Task, null=True, blank=True,on_delete=models.SET_NULL)
account = models.ForeignKey(Dataiku_account, on_delete=models.CASCADE)
statut = models.CharField(max_length=255, null=True, blank=True, choices = STATUS)
def save(self, *args, **kwargs):
if not self.id:
self.creation = timezone.now()
return super(User, self).save(*args, **kwargs)
class QCM(models.Model):
LearningPathUrl = models.CharField(max_length=255, null=True, blank=True)
LearningPathName = models.CharField(max_length=255, null=True, blank=True)
CourseUrl = models.CharField(max_length=255, null=True, blank=True)
CourseName = models.CharField(max_length=255, null=True, blank=True)
QcmUrl = models.CharField(max_length=255, null=True, blank=True)
QcmName = models.CharField(max_length=255, null=True, blank=True)
Lenght = models.IntegerField(default =0)
Verif = models.IntegerField(default =0)
status = models.BooleanField(default = False)
def __str__(self):
return "{}_{}_{}".format(self.LearningPathName, self.CourseName,self.QcmName)
class Session(models.Model):
STATUS = (
('running','running'),
('finish','finish')
)
email = models.ForeignKey(Dataiku_account , on_delete=models.CASCADE)
start = models.DateTimeField(editable =False)
countdown = models.CharField(max_length=10, blank=True, null=True, default= '59:59')
score = models.IntegerField(default=0)
lenght = models.IntegerField(default=0)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.start = timezone.now()
return super(User, self).save(*args, **kwargs)
# Create your models here.
class Question(models.Model):
STATUS = (
('pending', 'pending'),
('check', 'check')
)
CHOICES_TYPE = (
('checkbox', 'checkbox'),
('radio', 'radio')
)
text = models.CharField(max_length=255, primary_key=True)
#session = models.ForeignKey(Session, null=True, blank=True,on_delete = models.SET_NULL)
status = models.CharField(max_length=255, null=True, blank=True, choices = STATUS)
choice_type = models.CharField(max_length=255, null=True, blank=True, default= "radio" ,choices = CHOICES_TYPE)
max_choices = models.IntegerField(default = 0)
cursor = models.IntegerField(default = 1)
# cursor is the number of answer currently explored
qcm_link = models.ForeignKey(QCM, null=True, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.text
class Run(models.Model):
'''
A Run is a try
'''
STATUS = (
(True, 'True'),
(False, 'False')
)
id = models.AutoField(primary_key=True)
#creation = models.DateTimeField(editable =False)
question_link = models.ForeignKey(Question, null=True, blank=True, on_delete=models.CASCADE)
session_link = models.ForeignKey(Session, null=True, blank=True, on_delete=models.CASCADE)
status = models.BooleanField(default = False)
class Posibility(models.Model):
CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
)
rank = models.IntegerField( null=True, default= 1, blank=True, choices = CHOICES)
question_link = models.ForeignKey(Question, null=True, blank=True,on_delete=models.CASCADE)
text = models.CharField(max_length=255, null=True, blank=True)
#rank = models.ForeignKey(Answer, null=True, blank=True,on_delete= models.SET_NULL)
def __str__(self):
return self.text
class Answer(models.Model):
#choice = models.IntegerField( null=True, default= 1, blank=True, choices = CHOICES)
choice = models.ForeignKey(Posibility,to_field='rank',blank=True,null=True,on_delete= models.SET_NULL)
connected_run = models.ForeignKey(Run,to_field='id',blank=True,null=True,on_delete= models.SET_NULL)
def __str__(self):
return self.choice
|
[
"cbeltran@umanis.com"
] |
cbeltran@umanis.com
|
18b985fd2a25b161ab12d7f4f4e09fc83c30cc2e
|
3b21cbe5320137a3d8f7da40558294081211f63f
|
/Chapter04/AutoencMnist.py
|
daebd29ec15d7b88a838e6b5aa4a4d8016f69927
|
[
"MIT"
] |
permissive
|
Evelynatrocks/Python-Machine-Learning-Cookbook-Second-Edition
|
d06812bba0a32a9bd6e5e8d788769a07d28084cd
|
99d8b799dbfe1d9a82f0bcc3648aaeb147b7298f
|
refs/heads/master
| 2023-04-06T20:23:05.384943
| 2021-01-18T12:06:36
| 2021-01-18T12:06:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,181
|
py
|
from keras.datasets import mnist
(XTrain, YTrain), (XTest, YTest) = mnist.load_data()
print('XTrain shape = ',XTrain.shape)
print('XTest shape = ',XTest.shape)
print('YTrain shape = ',YTrain.shape)
print('YTest shape = ',YTest.shape)
import numpy as np
print('YTrain values = ',np.unique(YTrain))
print('YTest values = ',np.unique(YTest))
unique, counts = np.unique(YTrain, return_counts=True)
print('YTrain distribution = ',dict(zip(unique, counts)))
unique, counts = np.unique(YTest, return_counts=True)
print('YTrain distribution = ',dict(zip(unique, counts)))
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(121)
plt.hist(YTrain, alpha=0.8, ec='black')
plt.xlabel("Classes")
plt.ylabel("Number of occurrences")
plt.title("YTrain data")
plt.subplot(122)
plt.hist(YTest, alpha=0.8, ec='black')
plt.xlabel("Classes")
plt.ylabel("Number of occurrences")
plt.title("YTest data")
plt.show()
XTrain = XTrain.astype('float32') / 255
XTest = XTest.astype('float32') / 255
XTrain = XTrain.reshape((len(XTrain), np.prod(XTrain.shape[1:])))
XTest = XTest.reshape((len(XTest), np.prod(XTest.shape[1:])))
from keras.layers import Input
from keras.layers import Dense
from keras.models import Model
InputModel = Input(shape=(784,))
EncodedLayer = Dense(32, activation='relu')(InputModel)
DecodedLayer = Dense(784, activation='sigmoid')(EncodedLayer)
AutoencoderModel = Model(InputModel, DecodedLayer)
AutoencoderModel.summary()
AutoencoderModel.compile(optimizer='adadelta', loss='binary_crossentropy')
history = AutoencoderModel.fit(XTrain, XTrain,
batch_size=256,
epochs=100,
shuffle=True,
validation_data=(XTest, XTest))
DecodedDigits = AutoencoderModel.predict(XTest)
n=5
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, i + 1)
plt.imshow(XTest[i+10].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(DecodedDigits[i+10].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
|
[
"joecasillas001@gmail.com"
] |
joecasillas001@gmail.com
|
be0eb741b4aaaad5085131454dec219bdd1c93dd
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Anscombe/trend_LinearTrend/cycle_30/ar_/test_artificial_1024_Anscombe_LinearTrend_30__100.py
|
70d9b6daa1932fc44ee8f23227fa9317aea8fd0d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 268
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
e333c381e106259eee7a3f4e11f26674dd3a3594
|
30a8b69bd2e0a3f3c2c1c88fb3bd8a28e6fc4cd0
|
/Part1/auth_foursquare.py
|
dc09d963b40958ce2c5e3b9030a232e3dd9ca643
|
[] |
no_license
|
llord1/Mining-Georeferenced-Data
|
d49108f443922f02b90431ad7a9626ea17fd0554
|
c71f2e151ccfc4a1a9c07b5fcf4e95b7f7ba70e9
|
refs/heads/master
| 2021-05-30T13:27:57.663015
| 2015-12-29T09:10:08
| 2015-12-29T09:10:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
#!/usr/bin/env python
import foursquare
from foursquare_accounts import accounts
app = accounts["tutorial"]
client = foursquare.Foursquare(client_id=app["client_id"],
client_secret=app["client_secret"])
client.set_access_token(app["access_token"])
|
[
"bgoncalves@gmail.com"
] |
bgoncalves@gmail.com
|
c35024eb1eed9b0da1bdde17899977fd5b9b5c96
|
0201ac814d825cac1030dfe1ccdb7ef1657c205b
|
/__init__.py
|
a403709aa7de47dca868813496d90679f83afbc3
|
[
"BSD-3-Clause"
] |
permissive
|
karttur/geoimagine03-timeseries
|
c99be449dccaab767d470cfaa2b71d9dae339fba
|
aa8e1642fd4a8bc196ad6fce9f90b80066d54dac
|
refs/heads/main
| 2023-08-22T14:12:50.791746
| 2021-10-20T10:54:37
| 2021-10-20T10:54:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
"""
timeseries
==========================================
Package belonging to Karttur´s GeoImagine Framework.
Author
------
Thomas Gumbricht (thomas.gumbricht@karttur.com)
"""
from .version import __version__, VERSION, metadataD
from .timeseries import ProcessTimeSeries
from .numbautil import TimeSeriesNumba
#from .tsgraph import ProcessTimeSeriesGraph
|
[
"thomas.gumbricht@gmail.com"
] |
thomas.gumbricht@gmail.com
|
0eb944d3d4b625dd58953dcd5ad39efa5bcaeaa1
|
9c14bd53c8629262b1310962c1663a3c503ba3a0
|
/projects/golem/tests/project/add_directory_to_pages.py
|
7b7d283d6d17e02f6e630f4b7d7aad6a000fea95
|
[] |
no_license
|
ShubhamAnand/golem-demo
|
b083d44b5d2d5db79eae96aa5bb1f3307272d64b
|
a40ced5500b3bfdb54351393eeb8ccba19a50564
|
refs/heads/master
| 2021-07-16T00:44:57.663282
| 2017-10-22T22:56:25
| 2017-10-22T22:56:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
description = 'Verify that the user can add a directory in the pages section by appending \'\\\' at the end'
pages = ['login',
'index',
'project']
def setup(data):
navigate(data.env.url)
login.do_login('admin', 'admin')
index.create_access_project('test')
def test(data):
store('directory_name', random('ccccc'))
project.add_page_directory(data.directory_name)
project.verify_page_directory_exists(data.directory_name)
def teardown(data):
close()
|
[
"feo.luciano@gmail.com"
] |
feo.luciano@gmail.com
|
4034bde7a9e06c5d7487997a7acb9e10b85cca2b
|
0f1001169c4f229c253a6f1dc1c9aff51c797cca
|
/docs/markdown_to_html.py
|
ffacda661ea31a8286a001a77d5178f08b9a1fd3
|
[
"Apache-2.0"
] |
permissive
|
alencon/dash-bootstrap-components
|
f40e360787c96a1d9f7827cf042872b2f9cffcac
|
4f39856c13f66730512c57ed6dc0a819e8629293
|
refs/heads/master
| 2023-01-22T13:07:05.880865
| 2020-12-03T21:25:50
| 2020-12-03T21:25:50
| 318,998,227
| 1
| 0
|
Apache-2.0
| 2020-12-06T09:42:13
| 2020-12-06T09:42:13
| null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
from pathlib import Path
import markdown
from markdown.extensions.fenced_code import FencedBlockPreprocessor
# highlightJS expects the class "language-*" but markdown default is "*"
FencedBlockPreprocessor.LANG_TAG = ' class="language-%s"'
CONTENT = Path(__file__).parent / "content"
DEST = Path(__file__).parent / "templates" / "generated"
DOCS_HTML_TEMPLATE = """{% extends "docs.html" %}
{% block title %}<title><TITLE></title>{% endblock %}
{% block content %}<CONTENT>{% endblock %}
"""
CHANGELOG_HTML_TEMPLATE = """{% extends "changelog.html" %}
{% block title %}<title><TITLE></title>{% endblock %}
{% block content %}<CONTENT>{% endblock %}
"""
def convert_all_markdown_files():
for path in CONTENT.glob("docs/*.md"):
template = template_from_markdown(path, title_suffix=" - dbc docs")
with open(DEST / "docs" / path.name.replace(".md", ".html"), "w") as f:
f.write(template)
for path in CONTENT.glob("*.md"):
template = template_from_markdown(
path, template=CHANGELOG_HTML_TEMPLATE
)
with open(DEST / path.name.replace(".md", ".html"), "w") as f:
f.write(template)
def template_from_markdown(path, title_suffix="", template=DOCS_HTML_TEMPLATE):
md = markdown.Markdown(extensions=["fenced_code", "meta"])
text = path.read_text()
template = template.replace("<CONTENT>", md.convert(text))
return template.replace("<TITLE>", f"{md.Meta['title'][0]} - dbc docs")
if __name__ == "__main__":
convert_all_markdown_files()
|
[
"tomcbegley@gmail.com"
] |
tomcbegley@gmail.com
|
2400de35f3a6c6902ae173e097d54b31040a551a
|
2cbf3aaad62f4922d827af658fb5dbb7ac651bef
|
/teledusite/teledu/models/conceptAttribute.py
|
fc12d964e90614a6ff7813077017d177a3c7fecb
|
[] |
no_license
|
tctimmeh/teledu
|
0266240aa864cd2eed75857e66eaeb8270f44c1a
|
04135ffb04f397f29152ca48f868a957b18d504a
|
refs/heads/master
| 2021-01-23T08:52:32.817693
| 2013-10-29T01:34:41
| 2013-10-29T01:34:41
| 2,566,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
from django.db import models
from concept import Concept
from attribute import Attribute
class ConceptAttribute(Attribute):
concept = models.ForeignKey(Concept, related_name = 'attributes')
class Meta:
app_label = 'teledu'
unique_together = (('concept', 'name'))
def __unicode__(self):
return '%s - %s' % (self.concept.name, self.name)
def conceptName(self):
return self.concept.name
def gameSystem(self):
return self.concept.gameSystem
def getAttributeValuesForInstance(self, instance):
from conceptAttributeValue import ConceptAttributeValue
return ConceptAttributeValue.objects.filter(attribute = self, instance = instance)
|
[
"tctimmeh@gmail.com"
] |
tctimmeh@gmail.com
|
a45dacabb65a8b878d1cb07374fde8bc5ac07d6d
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/7_graph/bfs求无权图的最短路径/广义的bfs/488. 祖玛游戏-bfs剪枝.py
|
12413155d1f8a0da0d66c30102d92f4f104f18a7
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,038
|
py
|
# 1 <= board.length <= 16
# 1 <= hand.length <= 5
from collections import deque
from functools import lru_cache
import re
# 为什么使用广度优先搜索?
# 因为只需要找出需要回合数最少的方案,因此使用广度优先搜索可以得到可以消除桌面上所有球的方案时就直接返回结果,而不需要继续遍历更多需要回合数更多的方案。
class Solution:
def findMinStep(self, board: str, hand: str) -> int:
"""请你按上述操作步骤移除掉桌上所有球,计算并返回所需的 最少 球数。如果不能移除桌上所有的球,返回 -1 。"""
@lru_cache(None)
def clean(s: str) -> str:
"""碰到三个就删除整个"""
count = 1
while count:
s, count = re.subn(r'(\w)\1{2,}', '', s)
return s
hand = ''.join(sorted(hand))
queue = deque([(board, hand, 0)])
visited = set([(board, hand)])
while queue:
b, h, step = queue.popleft()
if not b:
return step
# 插入位置
for i in range(len(b)):
# 删除那个元素
for j in range(len(h)):
# 最重要的剪枝是,当手上的球 h[j] 和插入位置 i 前后的球 b[i-1], b[i] 三个球各不相同时,插入是不必要的:
sequence = [b[i - 1], b[i], h[j]] if i else [b[i], h[j]]
if len(set(sequence)) < len(sequence):
nextB = clean(b[:i] + h[j] + b[i:])
nextH = h[:j] + h[j + 1 :]
if (nextB, nextH) not in visited:
visited.add((nextB, nextH))
queue.append((nextB, nextH, step + 1))
return -1
print(Solution().findMinStep(board="WRRBBW", hand="RB"))
print(Solution().findMinStep(board="WWRRBBWW", hand="WRBRW"))
# re.subn返回一个元组
|
[
"lmt2818088@gmail.com"
] |
lmt2818088@gmail.com
|
c560a98412f1f79c8b28518349b9281f419d3cd1
|
5f313d8fce26a8ecfff8817ff566b7e1810fcba7
|
/timethings.py
|
4d68c2cbbbfce64ba5da5943421cca52b094884d
|
[] |
no_license
|
luispedro/mahotas-paper
|
cd2769a264149cac74ce8c694ca4f02e3f4a6c93
|
698f2a8640feba4e285318e2cd866db3705ec2c3
|
refs/heads/master
| 2020-03-30T16:26:20.362126
| 2013-08-26T09:03:17
| 2013-08-26T09:03:17
| 4,877,058
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,733
|
py
|
import skimage.morphology
import skimage.filter
import skimage.feature
import numpy as np
import timeit
import mahotas
import cv2
from os import path
luispedro_image = path.join(
path.dirname(mahotas.__file__),
'demos',
'data',
'luispedro.jpg')
f = mahotas.imread(luispedro_image, as_grey=True)
markers = np.zeros_like(f)
markers[100,100] = 1
markers[200,200] = 2
f = f.astype(np.uint8)
markers = markers.astype(int)
otsu = mahotas.otsu(f.astype(np.uint8))
fbin = f > otsu
fbin8 = fbin.astype(np.uint8)
Bc = np.eye(3)
Bc = Bc.astype(bool)
Bc8 = Bc.astype(np.uint8)
f3 = np.dstack([f,f,f])
f3 = f3.astype(np.uint8)
f3 = f3.copy()
filt = np.array([
[1,0,-1,0],
[2,2,3,-2],
[-1,0,0,1]
])
markers32 = markers.astype(np.int32)
def octagon(r):
octagon = np.ones((r*2+1, r*2+1), dtype=np.bool)
lim = r//2
for y in xrange(lim):
octagon[y,:lim-y] = 0
octagon &= octagon[::-1]
octagon &= octagon[:,::-1]
return octagon
pre ='''
import skimage.filter
import skimage.morphology
import skimage.feature
import numpy as np
import mahotas
import pymorph
import cv2
import timethings
octagon = timethings.octagon
f = timethings.f
f3 = timethings.f3
fbin = timethings.fbin
fbin8 = timethings.fbin8
f64 = f.astype(np.float64)
Bc = timethings.Bc
Bc8 = timethings.Bc8
markers = timethings.markers
markers32 = timethings.markers32
filt = timethings.filt
'''
def t(s):
return min(timeit.timeit(s, setup=pre, number=24) for i in xrange(3))
tests = [
('convolve', [
'mahotas.convolve(f, filt)',
None,
None,
None,
]),
('erode', [
'mahotas.erode(fbin, Bc)',
'pymorph.erode(fbin, Bc)',
'skimage.morphology.erosion(fbin8, Bc8)',
'cv2.erode(fbin8, Bc8)',
]),
('dilate', [
'mahotas.dilate(fbin, Bc)',
'pymorph.dilate(fbin, Bc)',
'skimage.morphology.dilation(fbin8, Bc8)',
'cv2.dilate(fbin8, Bc8)',
]),
('open', [
'mahotas.open(fbin, Bc)',
'pymorph.open(fbin, Bc)',
'skimage.morphology.opening(fbin8, Bc8)',
None,
]),
('median filter (2)', [
'mahotas.median_filter(f, octagon(2))',
None,
'skimage.filter.median_filter(f, 2)',
None,
]),
('median filter (10)', [
'mahotas.median_filter(f, octagon(10))',
None,
'skimage.filter.median_filter(f, 10)',
None,
]),
('center mass', [
'mahotas.center_of_mass(f)',
None,
'skimage.measure.regionprops(np.ones(f.shape, np.intc), ["WeightedCentroid"], intensity_image=f)',
None,
]),
('sobel', [
'mahotas.sobel(f, just_filter=True)',
None,
'skimage.filter.sobel(f)',
'cv2.Sobel(f, cv2.CV_32F, 1, 1)',
]),
('cwatershed', [
'mahotas.cwatershed(f, markers)',
'pymorph.cwatershed(f, markers)',
'skimage.morphology.watershed(f, markers)',
'cv2.watershed(f3, markers32.copy())',
]),
('daubechies', [
'mahotas.daubechies(f, "D4")',
None,
None,
None,
]),
('haralick', [
'mahotas.features.haralick(f)',
None,
'skimage.feature.greycoprops(skimage.feature.greycomatrix(f, [1], [0]))',
None,
]),
]
if __name__ == '__main__':
base = t('np.max(f)')
for name,statements in tests:
print r'{0:<20} &'.format(name),
for st in statements:
if st is None:
result = 'NA'
else:
result = '{:.1f}'.format( t(st)/base )
print '{0:>8} &'.format(result),
print r'\\'
|
[
"luis@luispedro.org"
] |
luis@luispedro.org
|
32a624033c4fcd4b0dab2f56ea427738fac85532
|
0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a
|
/python3/16_Web_Services/k_Projects/b_twitter/twitter_scrapping.py
|
e22fe2652cae147f89fc3a8955b3336f6f812e4b
|
[] |
no_license
|
udhayprakash/PythonMaterial
|
3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156
|
e72f44e147141ebc9bf9ec126b70a5fcdbfbd076
|
refs/heads/develop
| 2023-07-08T21:07:33.154577
| 2023-07-03T10:53:25
| 2023-07-03T10:53:25
| 73,196,374
| 8
| 5
| null | 2023-05-26T09:59:17
| 2016-11-08T14:55:51
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
#!/usr/bin/python
"""
Purpose: Twitter data scrapping
"""
import tweepy
class TwitterLogin:
def __init__(self):
consumer_key = "xxxxxxxxxxxxxxxxxxxxx"
consumer_secret = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
access_token = "00000-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
access_token_secret = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(
auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True
)
def credentials_verification(self):
result = vars(self.api.verify_credentials())["_json"]
print(
f"""Account
User : {result['name']}
Screen Name : {result['screen_name']}
Location : {result['location']}
Profile description : {result['description']}
Account Created at : {result['created_at']}
Display URL : {result['entities']['url']['urls'][0]['display_url']}
"""
)
class TwitterScrapping(TwitterLogin):
def __init__(self):
TwitterLogin.__init__(self)
# twtr = TwitterLogin()
# twtr.credentials_verification()
twrt_scrp = TwitterScrapping()
twrt_scrp.credentials_verification()
# Latest Public Timeline
tweet = twrt_scrp.api.home_timeline()[0]
print(
f"""
tweet.text : {tweet.text}
tweet.contributors : {tweet.contributors}
tweet.created_at : {tweet.created_at}
tweet.lang : {tweet.lang}
tweet.source : {tweet.source}
tweet.source_url : {tweet.source_url}
tweet.truncated : {tweet.truncated}
tweet.retweet_count : {tweet.retweet_count}
tweet.retweeted : {tweet.retweeted}
tweet.retweet : {tweet.retweet}
tweet.retweets : {tweet.retweets}
tweet.possibly_sensitive : {tweet.possibly_sensitive}
tweet.possibly_sensitive_appealable : {tweet.possibly_sensitive_appealable}
"""
)
|
[
"uday3prakash@gmail.com"
] |
uday3prakash@gmail.com
|
6ebf11b3f019ebe0338ba4e09bbe5dcd2b7fbd4f
|
ec4e153f3bf1b335bc1b31b85e6f9db4a6c4faa9
|
/wd_extractor/Document.py
|
6ed52824a553446bd88f07562c5ca97fb6fb3529
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
DuaneNielsen/wd_extractor
|
7936ac29ae97972cfe74973108aaad1efa5054b6
|
128a189bacd0cd2d7f1fa598202b9c4e55f48e2f
|
refs/heads/master
| 2021-01-19T14:13:42.441554
| 2017-09-19T02:16:08
| 2017-09-19T02:16:08
| 100,887,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
from .Graminator import Graminator
class Document:
def __init__(self, corpus, path, grams):
self.corpus = corpus
self.grams = grams
self.graminator = None
self.path = path
self.tokens = corpus.tokenizer.tokens(self)
def getText(self):
if self.path is not None:
handle = open(self.path, "r")
text = handle.read()
return text
def length(self):
return len(self.tokens)
def nGrams(self, gramsize):
return Graminator(self, gramsize)
def hasNext(self, index):
index += 1
return (index > 0) and index < len(self.tokens)
def nextToken(self, index):
return self.tokens[index + 1]
def hasPrev(self, index):
index -= 1
return (index > 0) and index < len(self.tokens)
def prevToken(self, index):
return self.tokens[index-1]
|
[
"duane.nielsen.rocks@gmail.com"
] |
duane.nielsen.rocks@gmail.com
|
12eca4b3e8ae4bf6f27c07a03bbc58a313b36f5f
|
d668209e9951d249020765c011a836f193004c01
|
/tools/pnnx/tests/test_torch_fft_irfft.py
|
8f92dd551a1f5c2f0b5ff9c8894b75b1b122d362
|
[
"BSD-3-Clause",
"Zlib",
"BSD-2-Clause"
] |
permissive
|
Tencent/ncnn
|
d8371746c00439304c279041647362a723330a79
|
14b000d2b739bd0f169a9ccfeb042da06fa0a84a
|
refs/heads/master
| 2023-08-31T14:04:36.635201
| 2023-08-31T04:19:23
| 2023-08-31T04:19:23
| 95,879,426
| 18,818
| 4,491
|
NOASSERTION
| 2023-09-14T15:44:56
| 2017-06-30T10:55:37
|
C++
|
UTF-8
|
Python
| false
| false
| 1,804
|
py
|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x, y, z):
x = torch.fft.irfft(x, norm="backward")
y = torch.fft.irfft(y, dim=(1), norm="forward")
z = torch.fft.irfft(z, norm="ortho")
return x, y, z
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 3, 120, 120)
y = torch.rand(1, 100, 2, 120)
z = torch.rand(1, 20, 20)
a = net(x, y, z)
# export torchscript
mod = torch.jit.trace(net, (x, y, z))
mod.save("test_torch_fft_irfft.pt")
# torchscript to pnnx
import os
os.system("../src/pnnx test_torch_fft_irfft.pt inputshape=[1,3,120,120],[1,100,2,120],[1,20,20]")
# pnnx inference
import test_torch_fft_irfft_pnnx
b = test_torch_fft_irfft_pnnx.test_inference()
for a0, b0 in zip(a, b):
if not torch.equal(a0, b0):
return False
return True
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
[
"noreply@github.com"
] |
Tencent.noreply@github.com
|
fff29da02d95309713cc9a0f7a86f69832ba5220
|
83a506a501561602ad3b259341225ddfbddab160
|
/GameServer/matchGames/Match_PK_DouDiZhu/redis_instance.py
|
3fe50de16f52f543bb74fc19e6b8dcc7b80828c3
|
[] |
no_license
|
daxingyou/SouYouJi_Game
|
9dc5f02eb28b910efb229653a8d0bffe425a7911
|
7311a994c9aba15b7234331709975ebc37e8453d
|
refs/heads/master
| 2023-03-28T01:36:48.955107
| 2020-04-05T01:24:17
| 2020-04-05T01:24:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
# -*- coding:utf-8 -*-
# !/bin/python
"""
Author: Pipo
Date: $Date$
Revision: $Revision$
Description: Redis
"""
import redis
from configs import CONFIGS
redisdb = None
def getInst(dbNum=CONFIGS['redis']['db']):
global redisdb
redisdb = redis.ConnectionPool(
host=CONFIGS['redis']['host'],
port=CONFIGS['redis']['port'],
db=dbNum,
password=CONFIGS['redis']['password']
)
redisData = redis.Redis(connection_pool=redisdb)
return redisData
|
[
"ronnyzh@yeah.net"
] |
ronnyzh@yeah.net
|
95e9f1d292ccffad970294b2b502147429f71198
|
23b5337bf410415b7b150e3ad60cafc1578a0441
|
/07-User-Authentication/01-Flask-Login/myproject/__init__.py
|
54b954d72924a39c7987de9eda326bbc04bd3512
|
[] |
no_license
|
VerdantFox/flask_course
|
b8de13ad312c14229f0c3bc2af70e8609a3b00fb
|
47b167b54bc580734fa69fc1a2d7e724adfb9610
|
refs/heads/master
| 2021-09-10T05:01:47.385859
| 2020-02-24T21:07:05
| 2020-02-24T21:07:05
| 241,973,705
| 0
| 0
| null | 2021-09-08T01:40:59
| 2020-02-20T19:40:42
|
Python
|
UTF-8
|
Python
| false
| false
| 761
|
py
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
# Create a login manager object
login_manager = LoginManager()
app = Flask(__name__)
# Often people will also separate these into a separate config.py file
app.config["SECRET_KEY"] = "mysecretkey"
basedir = os.path.abspath(os.path.dirname(__file__))
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + os.path.join(
basedir, "data.sqlite"
)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
Migrate(app, db)
# We can now pass in our app to the login manager
login_manager.init_app(app)
# Tell users what view to go to when they need to login.
login_manager.login_view = "login"
|
[
"verdantfoxx@gmail.com"
] |
verdantfoxx@gmail.com
|
51f55bc16f6ed44f56ff1aebecc74e8ef660f3e9
|
222b17dacb95640499ebd484697ead32e83b9ac1
|
/find_defining_class.py
|
3bf7302beb99684035cd35f6b235fee80a90520b
|
[] |
no_license
|
cicekozkan/python-examples
|
08330ef0fb1678cace17716ac2f490a3c5b95dd2
|
01b0e654c884946f8353995333a6946062c9c158
|
refs/heads/master
| 2021-01-14T14:06:37.585963
| 2014-12-26T07:55:13
| 2014-12-26T07:55:13
| 25,510,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 17 11:46:43 2014
@author: ocicek
"""
def find_defining_class(obj, meth_name):
"""takes an object and a method name (as a string) and returns
the class that provides the definition of the method"""
for ty in type(obj).mro():
if meth_name in ty.__dict__:
return ty
|
[
"cicekozkan@gmail.com"
] |
cicekozkan@gmail.com
|
663f935d7eb0b3d622d212ba615d6a7387719c88
|
c4cb90afb658a822c4ab867eec979227c0a25a6d
|
/testdemo/settings.py
|
752c0a3676d4faf49f9a97caa9ee3abc5b89683d
|
[] |
no_license
|
Contraz/demosys-py-test
|
81afb3dd801c0deb6046ddb0e7836de61182a36f
|
2aa760cb94ea34e3fb610ca8c43f1549ba9b53de
|
refs/heads/master
| 2021-01-19T16:58:33.608630
| 2018-07-13T07:59:34
| 2018-07-13T07:59:34
| 88,294,443
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
import os
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = False
SCREENSHOT_PATH = os.path.join(PROJECT_DIR, 'screenshots')
# Profile: any, core, compat
OPENGL = {
"version": (3, 3),
}
WINDOW = {
"size": (1280, 720),
"vsync": True,
"resizable": True,
"fullscreen": False,
"title": "demosys-py",
"cursor": True,
}
# MUSIC = os.path.join(PROJECT_DIR, 'resources/music/tg2035.mp3')
TIMER = 'demosys.timers.Timer'
# TIMER = 'demosys.timers.RocketTimer'
# TIMER = 'demosys.timers.RocketMusicTimer'
# TIMER = 'demosys.timers.MusicTimer'
ROCKET = {
'mode': 'project',
# 'mode': 'editor',
'rps': 60,
'project': os.path.join(PROJECT_DIR, 'resources', 'cube.xml'),
'files': os.path.join(PROJECT_DIR, 'resources', 'tracks'),
}
# What effects to load
EFFECTS = (
# 'testdemo.plain',
# 'testdemo.bouncingcubes',
# 'testdemo.bouncingcubes_instanced',
# 'testdemo.cube',
# 'testdemo.deferred',
# 'demosys.deferred',
'testdemo.feedback',
# 'testdemo.multilayer',
# 'testdemo.rockettest',
)
SHADER_DIRS = (
os.path.join(PROJECT_DIR, 'resources/shaders'),
)
SHADER_FINDERS = (
'demosys.core.shaderfiles.finders.FileSystemFinder',
'demosys.core.shaderfiles.finders.EffectDirectoriesFinder',
)
# Hardcoded paths to shader dirs
TEXTURE_DIRS = (
os.path.join(PROJECT_DIR, 'resource/textures'),
)
# Finder classes
TEXTURE_FINDERS = (
'demosys.core.texturefiles.finders.FileSystemFinder',
'demosys.core.texturefiles.finders.EffectDirectoriesFinder'
)
# Tell demosys how to find shaders split into multiple files
SHADERS = {
'vertex_shader_suffix': ('vert', '_vs.glsl', '.glslv'),
'fragment_shader_suffix': ('frag', '_fs.glsl', '.glslf'),
'geometry_shader_suffix': ('geom', '_gs.glsl', '.glslg'),
}
|
[
"eforselv@gmail.com"
] |
eforselv@gmail.com
|
f1ef29d00b9e612458bdb8429ac6cc2833dcfeb1
|
cd58faaffc84a4b1194fa55206ecce3458289edb
|
/setup.py
|
00f05e0c3c24ac0059253c0b709c8ccd9fd0b61a
|
[
"MIT"
] |
permissive
|
danieleteti/revelation
|
89327833d896c7350d41a7983d4781d980134a79
|
de4f8221e6c78aca174600dd333b0f9a5f62baa2
|
refs/heads/master
| 2020-03-21T08:10:47.420032
| 2018-07-17T18:05:17
| 2018-07-17T18:05:17
| 138,326,204
| 0
| 0
|
MIT
| 2018-06-22T16:43:33
| 2018-06-22T16:43:33
| null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""revelation setup file"""
import os
import re
from setuptools import find_packages, setup
PACKAGE = "revelation"
REQUIREMENTS = [
"Jinja2==2.10",
"Werkzeug==0.14.1",
"click==6.7",
"gevent-websocket==0.10.1",
"gevent==1.3.4",
"watchdog==0.8.3",
]
TEST_REQUIREMENTS = [
"coverage==4.5.1",
"coveralls==1.3.0",
"flake8==3.5.0",
"mock",
"nose==1.3.7",
]
with open("README.md", "r") as f:
README = f.read()
with open(os.path.join(PACKAGE, "__init__.py")) as init_file:
INIT = init_file.read()
VERSION = re.search(
"^__version__ = ['\"]([^'\"]+)['\"]", INIT, re.MULTILINE
).group(1)
AUTHOR = re.search(
"^__author__ = ['\"]([^'\"]+)['\"]", INIT, re.MULTILINE
).group(1)
EMAIL = re.search(
"^__email__ = ['\"]([^'\"]+)['\"]", INIT, re.MULTILINE
).group(1)
setup(
name=PACKAGE,
version=VERSION,
description="Make awesome reveal.js presentations with revelation",
long_description=README,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
url="https://github.com/humrochagf/revelation",
license="MIT",
packages=find_packages(),
package_data={PACKAGE: ["templates/presentation.html"]},
zip_safe=False,
install_requires=REQUIREMENTS,
entry_points=dict(console_scripts=["revelation=revelation.cli:cli"]),
platforms="any",
keywords="presentation slides reveal.js markdown",
classifiers=[
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Multimedia :: Graphics :: Presentation",
"Topic :: Text Processing :: Markup :: HTML",
],
test_suite="tests",
tests_require=TEST_REQUIREMENTS,
extras_require={"test": TEST_REQUIREMENTS},
)
|
[
"humrochagf@gmail.com"
] |
humrochagf@gmail.com
|
5fb0d6de6e07ff397e5a483f3a634518532a6424
|
427cb811a465677542172b59f5e5f102e3cafb1a
|
/python/print/printContent.py
|
6a213db972abd85fe761285d5c7b5bbb5ae57cdd
|
[] |
no_license
|
IzaakWN/CodeSnippets
|
1ecc8cc97f18f77a2fbe980f322242c04dacfb89
|
07ad94d9126ea72c1a8ee5b7b2af176c064c8854
|
refs/heads/master
| 2023-07-26T21:57:10.660979
| 2023-07-20T20:35:59
| 2023-07-20T20:35:59
| 116,404,943
| 18
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,361
|
py
|
# https://docs.python.org/2/library/optparse.html
# http://www.macworld.com/article/1132219/software-utilities/termfoldercomp.html
# https://automatetheboringstuff.com/chapter7/
# TODO: function to replace patterns https://docs.python.org/2/library/re.html
# TODO: add month and year to fileName
# TODO .bundle
import os, sys
from argparse import ArgumentParser
import re
import time
argv = sys.argv
parser = ArgumentParser(description="Make textfile with hierarchy of subdir for a given dir")
parser.add_argument( "file",
type=str, action='store',
metavar="DIRECTORY", help="Input directory" )
parser.add_argument( "-o", "--output", dest="fileName",
default=None, action='store',
metavar="FILE_NAME", help="file name to print subdirs hierarchy" )
parser.add_argument( "-t", "--extensions", dest="extensions",
nargs='+', default=None, action='store',
metavar="EXT", help="only specified extensions" )
parser.add_argument( "-d","--depth", dest="maxDepth",
type=int, default=None, action='store',
metavar="MAX_DEPTH", help="set maximum subdir depth" )
parser.add_argument( "-e", "--excludeFiles", dest="excludeFiles",
default=False, action='store_true',
help="exclude files" )
parser.add_argument( "-a", "--all", dest="showAll",
default=False, action='store_true',
help="show hidden files and directories" )
args = parser.parse_args()
fileName = args.fileName
extensions = args.extensions
maxDepth = args.maxDepth
includeFiles = not args.excludeFiles
showAll = args.showAll
print args.file
tab = " "
def replacePattern2(string,pattern,replaceString):
parts = pattern.split("*")
a = 0
for part in parts:
if part in string[a:]:
a = sting[a:].index(part)
else:
return string
def replacePattern2(string,patterns,replaceString=""):
# pattern = re.compile (r'\[720.*?BluRay.*?YIFY\]')
# pattern.findall("lol (2010) [720p foo BluRay YIFY bar]")
for pattern in patterns:
pattern = pattern.replace("[","\[").replace("]","\]").replace("*",".*?")
comp = re.compile(pattern)
matches = findall(string)
for match in matches:
string = string.replace(match,replaceString,1)
def listSubDirs(dir,extensions=[],indent="",depth=0):
list = os.listdir(dir)
hierarchy = [ ]
for i in list:
if i[0] != "." or showAll:
subdir = dir+"/"+i
if os.path.isdir(subdir) and not i[-4:] == ".app":
hierarchy += [ indent+i ]
if (maxDepth == None or depth < maxDepth):
hierarchy += listSubDirs( subdir,
extensions=extensions,
indent=tab+indent,
depth=depth+1 )
elif includeFiles or i[-4:] == ".app":
if extensions:
for ext in extensions:
if ext == i[-len(ext):]:
hierarchy += [ indent+i ]
break
else:
hierarchy += [ indent+i ]
return hierarchy
def main(dir):
global fileName
path = "/"
if "/" in dir:
if dir[-1] == "/":
dir = dir[:-1]
path = dir[:dir.rfind("/")+1]
hierarchy = listSubDirs(dir,extensions=extensions)
for i in hierarchy:
print i
if not fileName:
t = time.struct_time(time.localtime())
fileName = "%s hierarchy %i-%i-%i.txt" % (dir.replace(path,""), t.tm_mday, t.tm_mon, t.tm_year)
file = open(fileName,'write')
file.write(dir+"\n\n")
for i in hierarchy:
file.write(i+"\n")
print ">>> %s written" % fileName
file.close()
if __name__ == '__main__':
if len(sys.argv) > 1:
dir = str(sys.argv[1])
if os.path.isdir(dir):
main(dir)
else:
if not os.path.isdir(dir):
print ">>> ERROR: argument is not a directory: %s" % dir
else:
print ">>> ERROR: Needs an arguments"
print ">>> done"
|
[
"iwn_@hotmail.com"
] |
iwn_@hotmail.com
|
376d6b0ccb6509c96d3c340f24977524379fc444
|
45de3aa97525713e3a452c18dcabe61ac9cf0877
|
/src/secondaires/diligence/fonctions/diligences.py
|
0c9dd2f3eb7a0ab9f84de366ec3c7a1105448876
|
[
"BSD-3-Clause"
] |
permissive
|
stormi/tsunami
|
95a6da188eadea3620c70f7028f32806ee2ec0d1
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
refs/heads/master
| 2020-12-26T04:27:13.578652
| 2015-11-17T21:32:38
| 2015-11-17T21:32:38
| 25,606,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,016
|
py
|
# -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction diligences."""
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Retourne les diligences (salle d'entrée)."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.toutes_diligences)
@staticmethod
def toutes_diligences():
"""Retourne toutes les diligences de l'univers.
Cette fonction retourne toutes les diligences sous la forme
d'une liste. Cette liste contient des salles. Les fonctions
et actions manipulant les diligencs attendent une salle
comme paramètre : les salles retournées sont les salles
d'entrées (celles de mnémonique "1"). La diligence possède
normalement une sortie "bas" menant vers la salle permettant
d'accéder à la diligence.
Cette fonction n'attend aucun paramètre.
Exemple d'utilisation :
diligences = diligences()
pour chaque entree dans diligences:
exterieur = destination(entree, "bas")
# exterieur contient la salle à l'extérieur de la diligence
fait
"""
zones = importeur.diligence.zones
entrees = []
for zone in zones:
salle = importeur.salle.salles.get("{}:1".format(zone.cle))
if salle:
entrees.append(salle)
return entrees
|
[
"vincent.legoff.srs@gmail.com"
] |
vincent.legoff.srs@gmail.com
|
8fc69ea6d952ef1e4cfc879a40a170fe9c897d6c
|
d9fd9c6329461235f140393f1e934362d0f645df
|
/Unidad 2/Módulo 6/Sección 4/eje_09.py
|
e3cf1510314a26331adc0b550e3c13291c3325ad
|
[
"MIT"
] |
permissive
|
angelxehg/utzac-python
|
e6b5ee988d1d76c549ab0fa49717eb042fa7d91f
|
fb88bcc661518bb35c08a102a67c20d0659f71db
|
refs/heads/main
| 2022-12-02T11:16:27.134741
| 2020-08-14T19:38:33
| 2020-08-14T19:38:33
| 265,944,612
| 0
| 0
|
MIT
| 2020-08-07T21:23:53
| 2020-05-21T20:25:24
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
class MiClase:
pass
obj = MiClase()
obj.a = 1
obj.b = 2
obj.i = 3
obj.ireal = 3.5
obj.entero = 4
obj.z = 5
def incIntsI(obj):
for name in obj.__dict__.keys():
if name.startswith('i'):
val = getattr(obj, name)
if isinstance(val, int):
setattr(obj, name, val + 1)
print(obj.__dict__)
incIntsI(obj)
print(obj.__dict__)
|
[
"50889225+angelxehg@users.noreply.github.com"
] |
50889225+angelxehg@users.noreply.github.com
|
bb936e36f73b3022e5fc4ff938b2e48d6d89e8c1
|
4273f162abb12ef1939271c2aabee9547ac6afee
|
/studio_usd_pipe/test/ver.py
|
e3d449cb801732082a041c7c123caf699f61c94a
|
[] |
no_license
|
xiyuhao/subins_tutorials
|
2717c47aac0adde099432e5dfd231606bf45a266
|
acbe4fe16483397e9b0f8e240ca23bdca652b92d
|
refs/heads/master
| 2023-07-28T13:42:41.445399
| 2021-09-12T11:02:37
| 2021-09-12T11:02:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
input_data = {
"exe": [
"KONSOLE_EXE",
"/venture/source_code/subins_tutorials/studio_usd_pipe/bin/build-in/konsole/main.sh"
],
"name": [
"APPLICATION_NAME",
"konsole2.10.5"
],
"version": [
"KONSOLE_VERSION",
"konsole2.10.5"
],
"path": [
"KONSOLE_PATH",
"/venture/source_code/subins_tutorials/studio_usd_pipe/bin/build-in/konsole"
],
"order": 0,
"bash": "/venture/source_code/subins_tutorials/studio_usd_pipe/bin/build-in/konsole/main.sh",
"icon": [
"KONSOLE_ICON",
"/venture/source_code/subins_tutorials/studio_usd_pipe/resource/icons/konsole.png"
]
}
import os
import json
os.environ['KONSOLE_EXE'] = "/venture/source_code/subins_tutorials/studio_usd_pipe/bin/build-in/konsole/main.sh:subin"
for each in input_data:
if not isinstance(input_data[each], list):
continue
env_name = input_data[each][0]
env_value = input_data[each][1]
if isinstance(env_value, list):
env_value = ':'.join(env_value)
else:
env_value = str(env_value)
if os.getenv(env_name):
envrons = os.getenv(env_name).split(':')
envrons.append(env_value)
envrons = list(set(envrons))
env_value = os.environ[':'.join(envrons))
else:
env_value = str(env_value)
|
[
"subing85@gmail.com"
] |
subing85@gmail.com
|
4229eb3d57d5f03b46b944d86271693266461296
|
e73a2ff9458effe038ebabfe9db6cdaf0c5bc473
|
/order_food_online_project/order_food_online/urls.py
|
c5771206f24064f500d0c904aa8232d203cf5dcb
|
[
"MIT"
] |
permissive
|
MaksNech/django_order_food_ingredients
|
fcad5668b92b90776715d39e3f241577cf4364fa
|
3578e36570ce99b25136942320fbcd7df956d435
|
refs/heads/master
| 2020-04-20T21:20:38.496108
| 2019-04-06T15:17:29
| 2019-04-06T15:17:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
"""order_food_online URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.shortcuts import redirect
from django.conf.urls.static import static
urlpatterns = [
path('api/v1/', include('order_food_online.api_urls')),
path('', lambda request: redirect('foods/', permanent=True)),
path('i18n/', include('django.conf.urls.i18n')),
path('admin/', admin.site.urls),
path('foods/', include('foods.urls')),
path('notes/', include('notes.urls')),
path('authentication/', include('authentication.urls')),
path('accounts/', include('django.contrib.auth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"nechypurenko.shag@gmail.com"
] |
nechypurenko.shag@gmail.com
|
820c2bd2006c8b43d126a6d5226df4dd461d5814
|
b6ef959b538e4bffec92998a553175248bd72a77
|
/06-Machine_Learning/brain.py
|
1376cf1f411443947b708acf7499cd6bdf52de49
|
[
"MIT"
] |
permissive
|
suzynakayama/udemy-python-dev
|
9e384e3683a300f07c14d2a5862003038a4b169c
|
fbb35d00f94296da1281e6042a4efe506f79dddb
|
refs/heads/main
| 2023-02-10T11:50:47.650049
| 2021-01-07T22:46:52
| 2021-01-07T22:46:52
| 307,135,927
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
import os
from imageai.Classification import ImageClassification
# get current directory
execution_path = os.getcwd()
prediction = ImageClassification()
prediction.setModelTypeAsMobileNetV2()
prediction.setModelPath(os.path.join(execution_path, "mobilenet_v2.h5"))
prediction.loadModel()
predictions, probabilities = prediction.classifyImage(os.path.join(execution_path, "giraffe.jpg"), result_count=5 )
for eachPrediction, eachProbability in zip(predictions, probabilities):
print(eachPrediction , " : " , eachProbability)
|
[
"suzy.nakayama@gmail.com"
] |
suzy.nakayama@gmail.com
|
a433ae84fb074b61840e19b067915bc4fc1b848c
|
490ffe1023a601760ae7288e86723f0c6e366bba
|
/kolla-docker/patching/zun_compute_api/provideraccount.py
|
a338bd5fd861592d8f7f624b5913d613b42fd69c
|
[] |
no_license
|
bopopescu/Cloud-User-Management
|
89696a5ea5d2f95191327fbeab6c3e400bbfb2b8
|
390988bf4915a276c7bf8d96b62c3051c17d9e6e
|
refs/heads/master
| 2022-11-19T10:09:36.662906
| 2018-11-07T20:28:31
| 2018-11-07T20:28:31
| 281,786,345
| 0
| 0
| null | 2020-07-22T21:26:07
| 2020-07-22T21:26:06
| null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
def provideraccount_update(self, context, container, *args):
if direct_action:
return self.manager.provideraccount_update(context, container, *args)
else:
return self.rpcapi.provideraccount_update(context, container, *args)
def provideraccount_show(self, context, container, *args):
if direct_action:
return self.manager.provideraccount_show(context, container)
else:
return self.rpcapi.provideraccount_show(context, container)
def provideraccount_create(self, context, new_provideraccount, extra_spec,
requested_networks):
host_state = None
try:
host_state = {} # self._schedule_container(context, new_provideraccount, extra_spec)
except Exception as exc:
# new_provideraccount.status = consts.ERROR
# new_provideraccount.status_reason = str(exc)
# new_provideraccount.save(context)
return
if direct_action:
self.manager.provideraccount_create(context, "", requested_networks, new_provideraccount)
else:
self.rpcapi.provideraccount_create(context, "", new_provideraccount, "", requested_networks)
# self.rpcapi.provideraccount_create(context, host_state['host'],
# new_provideraccount, host_state['limits'],
# requested_networks)
def provideraccount_delete(self, context, container, *args):
return self.manager.provideraccount_delete(context, container, True)
# return self.rpcapi.provideraccount_delete(context, container, *args)
|
[
"Mr.Qinlichao@hotmail.com"
] |
Mr.Qinlichao@hotmail.com
|
87dcdc1f187f0619115ef51295c60468005bd5f3
|
dcce56815dca2b18039e392053376636505ce672
|
/dumpscripts/itertools_filterfalse.py
|
4db9836daa58ad384b41f161c27d4886ab93f22c
|
[] |
no_license
|
robertopauletto/PyMOTW-it_3.0
|
28ff05d8aeccd61ade7d4107a971d9d2576fb579
|
c725df4a2aa2e799a969e90c64898f08b7eaad7d
|
refs/heads/master
| 2021-01-20T18:51:30.512327
| 2020-01-09T19:30:14
| 2020-01-09T19:30:14
| 63,536,756
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
# itertools_filterfalse.py
from itertools import *
def check_item(x):
print('Verifica:', x)
return x < 1
for i in filterfalse(check_item, [-1, 0, 1, 2, -2]):
print('Trattengo:', i)
|
[
"roberto.pauletto@gmail.com"
] |
roberto.pauletto@gmail.com
|
747ca14a18296d4beabd473f554d3da345152774
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/sok2_20180606104430.py
|
e3d724e0922456fec8afc2db0669485e5ed3545c
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424
| 2018-07-04T17:21:13
| 2018-07-04T17:21:13
| 139,749,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
row1 = [0,0,0,0,0,0,0,0,0]
row2 = [0,0,0,5,0,6,0,0,0]
row3 = [0,0,1,0,0,0,0,3,0]
row4 = [0,9,5,0,0,0,2,0,0]
row5 = [0,0,0,0,0,1,6,0,7]
row6 = [1,0,6,0,0,9,0,0,5]
row7 = [7,0,0,8,0,3,9,0,0]
row8 = [0,3,8,9,0,0,0,2,0]
row9 = [0,5,0,0,2,0,7,0,0]
print(row1)
print(row2)
print(row3)
print("")
print(row4)
print(row5)
print(row6)
print("")
print(row7)
print(row8)
print(row9)
while True:
x = input("Wprowadz x y z:")
try:
if int(x[0])==1:
row1[int(x[2])-1]=x[4]
print("ok")
except ValueError: # przechwytuje wyjątek literę i kończy program.
print("Wprowadz cyfrę!")
continue
print(row1[0],row1[1],row1[2], sep=' ', end=" - ")
print(row1[3],row1[4],row1[5], sep=' ', end=" - ")
print(row1[6],row1[7],row1[8], sep=' ')
print(row1[0],row1[1],row1[2], sep=' ', end=" - ")
print(row1[3],row1[4],row1[5], sep=' ', end=" - ")
print(row1[6],row1[7],row1[8], sep=' ')
#print(str(*r11, sep='') + "-" + str(r12) + " - " + str(r13))
print(row2)
print(row3)
print(""),
print(row4)
print(row5)
print(row6)
print("")
print(row7)
print(row8)
print(row9)
#print(new)
#rds.insert(index, "is")
|
[
"inz.kamil.wos@gmail.com"
] |
inz.kamil.wos@gmail.com
|
92f3ee7e26c3ee1406bd8042cee27fc0d7f8f4c2
|
d115cf7a1b374d857f6b094d4b4ccd8e9b1ac189
|
/tags/pygccxml_dev_1.0.0/unittests/plain_c_tester.py
|
c26b2581fbaca21e9f350c66801aeb71c9acd90f
|
[
"BSL-1.0"
] |
permissive
|
gatoatigrado/pyplusplusclone
|
30af9065fb6ac3dcce527c79ed5151aade6a742f
|
a64dc9aeeb718b2f30bd6a5ff8dcd8bfb1cd2ede
|
refs/heads/master
| 2016-09-05T23:32:08.595261
| 2010-05-16T10:53:45
| 2010-05-16T10:53:45
| 700,369
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
import autoconfig
import parser_test_case
from pygccxml import utils
from pygccxml import parser
from pygccxml import declarations
class tester_t( parser_test_case.parser_test_case_t ):
def __init__(self, *args ):
parser_test_case.parser_test_case_t.__init__( self, *args )
self.header = 'plain_c.c'
self.global_ns = None
def setUp(self):
if not self.global_ns:
decls = parser.parse( [self.header], self.config )
self.global_ns = declarations.get_global_namespace( decls )
self.global_ns.init_optimizer()
def test( self ):
self.global_ns.free_fun( 'hello_sum' )
self.global_ns.free_fun( 'hello_print' )
declarations.print_declarations( self.global_ns )
f = self.global_ns.free_fun( 'do_smth' )
for arg in f.arguments:
print arg.type.decl_string
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
|
[
"roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76"
] |
roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76
|
1c1722d15f2ee8dde90347013662ca30cd87c6a3
|
0269037acc7785a58f8786c60be8ccea8ef3f6f3
|
/indico/modules/attachments/models/folders_test.py
|
71309414a40429ae60741e7457815421438a6ce8
|
[
"MIT"
] |
permissive
|
bebusl/cbnu_indico
|
1ffa7042a1f706da953214b39827cbdbb1387cce
|
60b37c2bf54cd7f17092b2a9ad21311762729601
|
refs/heads/master
| 2023-01-18T22:22:09.655751
| 2020-12-02T09:04:06
| 2020-12-02T09:04:06
| 281,068,896
| 0
| 0
|
MIT
| 2020-07-20T09:09:44
| 2020-07-20T09:09:43
| null |
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.attachments import AttachmentFolder
def test_update_principal(dummy_user, dummy_event):
folder = AttachmentFolder(object=dummy_event, is_default=True)
assert not folder.acl_entries
# not changing anything -> shouldn't be added to acl
entry = folder.update_principal(dummy_user)
assert entry is None
assert not folder.acl_entries
# adding user with read access -> new acl entry since the user isn't in there yet
entry = initial_entry = folder.update_principal(dummy_user, read_access=True)
assert folder.acl_entries == {entry}
# not changing anything on existing principal -> shouldn't modify acl
entry = folder.update_principal(dummy_user)
assert entry is initial_entry
assert folder.acl_entries == {entry}
# granting permission which is already present -> shouldn't modify acl
entry = folder.update_principal(dummy_user, read_access=True)
assert entry is initial_entry
assert folder.acl_entries == {entry}
# removing read access -> acl entry is removed
entry = folder.update_principal(dummy_user, read_access=False)
assert entry is None
assert not folder.acl_entries
def test_remove_principal(dummy_user, dummy_event):
folder = AttachmentFolder(object=dummy_event, is_default=True)
assert not folder.acl_entries
entry = folder.update_principal(dummy_user, read_access=True)
assert folder.acl_entries == {entry}
folder.remove_principal(dummy_user)
assert not folder.acl_entries
# doesn't do anything but must not fail either
folder.remove_principal(dummy_user)
assert not folder.acl_entries
|
[
"adrian.moennich@cern.ch"
] |
adrian.moennich@cern.ch
|
977da3579e8f87f1655e64f2de8938f2c1adc395
|
1207d50126d4d59966573927c5eadd94db6aeb59
|
/svggen/library/Rectangle.py
|
cb7e78caee0de5d274f55684375712ff71248bc0
|
[] |
no_license
|
christianwarloe/robotBuilder
|
aee03c189972f1d305c6e13d106b362b5d26d187
|
3f8fbc267ac7b9bbae534d1208278541a7b5eaa5
|
refs/heads/master
| 2021-06-13T02:42:24.834816
| 2017-04-07T01:01:52
| 2017-04-07T01:01:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
from svggen.api.FoldedComponent import FoldedComponent
from svggen.api.composables.graph.Face import Rectangle as Rect
from svggen.api.composables.GraphComposable import Graph
from svggen.api.ports.EdgePort import EdgePort
from svggen.api.ports.FacePort import FacePort
class Rectangle(FoldedComponent):
_test_params = {
'l': 100,
'w': 400,
}
def define(self, **kwargs):
FoldedComponent.define(self, **kwargs)
self.addParameter("l", 100, positive=True)
self.addParameter("w", 400, positive=True)
def assemble(self):
dx = self.getParameter("l")
dy = self.getParameter("w")
self.addFace(Rect("r", dx, dy))
self.place()
self.addInterface("face", FacePort(self, "r"))
self.addInterface("b", EdgePort(self, "e0"))
self.addInterface("r", EdgePort(self, "e1"))
self.addInterface("t", EdgePort(self, "e2"))
self.addInterface("l", EdgePort(self, "e3"))
if __name__ == "__main__":
h = Rectangle()
#h._make_test()
|
[
"christian.warloe@gmail.com"
] |
christian.warloe@gmail.com
|
96eaba8baa60786fa762b5a9ed86e115dfb96fb2
|
b5ba12d4dcb240ba6069964380f6a3aede79f448
|
/mixins/simulation.py
|
7ccaefcd9da8089d5c296d7cfa10fab98b594edc
|
[] |
no_license
|
70-6C-65-61-73-75-72-65h/erp
|
9e1a6f20a15d16794043f583022b1e04a9435b20
|
0e088c767d0d0c0e5515be703ed71252d55b70d9
|
refs/heads/master
| 2022-03-27T21:12:52.305257
| 2019-12-17T15:41:59
| 2019-12-17T15:41:59
| 224,333,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
# from datetime import datetime
# # from string to datetime: time.strptime
# # from datetime to string: time.strftime
# def today():
# # simulational date_today:
# def datetime_date_today():
# """ only date returned """
# month, day, year = today()
# datetime_str = f'{month}/{day}/{year}'
# datetime_object = datetime.strptime(datetime_str, '%m/%d/%y')
# return datetime_object
|
[
"max.ulshin.max@istat.com.ua"
] |
max.ulshin.max@istat.com.ua
|
bc5ad557d4f626a81e3b4e15f4bf084bb239d1a7
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-vod/huaweicloudsdkvod/v1/model/show_asset_detail_request.py
|
310a07633cd81850d55c262f5845bd24add26eb3
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,328
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowAssetDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'asset_id': 'str',
'categories': 'list[str]'
}
attribute_map = {
'asset_id': 'asset_id',
'categories': 'categories'
}
def __init__(self, asset_id=None, categories=None):
"""ShowAssetDetailRequest - a model defined in huaweicloud sdk"""
self._asset_id = None
self._categories = None
self.discriminator = None
self.asset_id = asset_id
if categories is not None:
self.categories = categories
@property
def asset_id(self):
"""Gets the asset_id of this ShowAssetDetailRequest.
媒资ID。
:return: The asset_id of this ShowAssetDetailRequest.
:rtype: str
"""
return self._asset_id
@asset_id.setter
def asset_id(self, asset_id):
"""Sets the asset_id of this ShowAssetDetailRequest.
媒资ID。
:param asset_id: The asset_id of this ShowAssetDetailRequest.
:type: str
"""
self._asset_id = asset_id
@property
def categories(self):
"""Gets the categories of this ShowAssetDetailRequest.
查询的信息类型。 - 为空时表示查询所有信息。 - 不为空时支持同时查询一个或者多个类型的信息,取值如下: - - base_info:媒资基本信息。 - - transcode_info:转码结果信息。 - - thumbnail_info:截图结果信息。 - - review_info:审核结果信息。
:return: The categories of this ShowAssetDetailRequest.
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this ShowAssetDetailRequest.
查询的信息类型。 - 为空时表示查询所有信息。 - 不为空时支持同时查询一个或者多个类型的信息,取值如下: - - base_info:媒资基本信息。 - - transcode_info:转码结果信息。 - - thumbnail_info:截图结果信息。 - - review_info:审核结果信息。
:param categories: The categories of this ShowAssetDetailRequest.
:type: list[str]
"""
self._categories = categories
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowAssetDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
5d5b5c72b46a23b4384971602e86d7719b885892
|
b8bde9a346685e1428a8284f7ffb14f15e35fb78
|
/deploy/pinax.fcgi
|
43f92ff0c8e98da7b3b5d94b6afd6d72456e3420
|
[] |
no_license
|
bhaugen/pinax-groups-experiments
|
9302762c8e7379f067385a7280ef9af4dc4c5e8f
|
d520ccbfdb8228e10b6e547df6f64106caa6f0ec
|
refs/heads/master
| 2020-04-05T22:49:04.750605
| 2009-11-13T19:36:20
| 2009-11-13T19:36:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
fcgi
|
# pinax.fcgi is configured to live in projects/pinax_groups/deploy.
import os
import sys
from os.path import abspath, dirname, join
from site import addsitedir
sys.path.insert(0, abspath(join(dirname(__file__), "../../")))
from django.conf import settings
os.environ["DJANGO_SETTINGS_MODULE"] = "pinax_groups.settings"
sys.path.insert(0, join(settings.PINAX_ROOT, "apps"))
sys.path.insert(0, join(settings.PROJECT_ROOT, "apps"))
from django.core.servers.fastcgi import runfastcgi
runfastcgi(method="threaded", daemonize="false")
|
[
"bob.haugen@gmail.com"
] |
bob.haugen@gmail.com
|
5756338cb6fc8c1265dcba6437dce7333023f4e4
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/System/Windows/Forms/__init___parts/RichTextBoxSelectionTypes.py
|
9924fd6d966d8eeaba9fa14927670259ceddad2d
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
class RichTextBoxSelectionTypes(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the type of selection in a System.Windows.Forms.RichTextBox control.
enum (flags) RichTextBoxSelectionTypes,values: Empty (0),MultiChar (4),MultiObject (8),Object (2),Text (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Empty=None
MultiChar=None
MultiObject=None
Object=None
Text=None
value__=None
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
9091f98df3dff4ab938bd0ab9d306ef2b2ca9621
|
f6f15809ac70089ef4cfb1ade40e2dc58d239f81
|
/test/functional/data/invalid_txs.py
|
1f19ffe59a0e3a5e593440e7030364022a6315d2
|
[
"MIT"
] |
permissive
|
lamyaim/bitgesell
|
fcc96f6765d3907ce923f411a1b2c6c4de9d55d6
|
64c24348f1ba8788fbffaf663b3df38d9b49a5d1
|
refs/heads/master
| 2023-04-30T08:16:40.735496
| 2020-12-10T05:23:08
| 2020-12-10T05:23:08
| 369,859,996
| 1
| 0
|
MIT
| 2021-05-22T16:50:56
| 2021-05-22T16:48:32
| null |
UTF-8
|
Python
| false
| false
| 7,089
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Templates for constructing various sorts of invalid transactions.
These templates (or an iterator over all of them) can be reused in different
contexts to test using a number of invalid transaction types.
Hopefully this makes it easier to get coverage of a full variety of tx
validation checks through different interfaces (AcceptBlock, AcceptToMemPool,
etc.) without repeating ourselves.
Invalid tx cases not covered here can be found by running:
$ diff \
<(grep -IREho "bad-txns[a-zA-Z-]+" src | sort -u) \
<(grep -IEho "bad-txns[a-zA-Z-]+" test/functional/data/invalid_txs.py | sort -u)
"""
import abc
from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint
from test_framework import script as sc
from test_framework.blocktools import create_tx_with_script, MAX_BLOCK_SIGOPS
from test_framework.script import (
CScript,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_2MUL,
OP_2DIV,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT
)
basic_p2sh = sc.CScript([sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL])
class BadTxTemplate:
"""Allows simple construction of a certain kind of invalid tx. Base class to be subclassed."""
__metaclass__ = abc.ABCMeta
# The expected error code given by BGLd upon submission of the tx.
reject_reason = ""
# Only specified if it differs from mempool acceptance error.
block_reject_reason = ""
# Do we expect to be disconnected after submitting this tx?
expect_disconnect = False
# Is this tx considered valid when included in a block, but not for acceptance into
# the mempool (i.e. does it violate policy but not consensus)?
valid_in_block = False
def __init__(self, *, spend_tx=None, spend_block=None):
self.spend_tx = spend_block.vtx[0] if spend_block else spend_tx
self.spend_avail = sum(o.nValue for o in self.spend_tx.vout)
self.valid_txin = CTxIn(COutPoint(self.spend_tx.sha256, 0), b"", 0xffffffff)
@abc.abstractmethod
def get_tx(self, *args, **kwargs):
"""Return a CTransaction that is invalid per the subclass."""
pass
class OutputMissing(BadTxTemplate):
reject_reason = "bad-txns-vout-empty"
expect_disconnect = True
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
tx.calc_sha256()
return tx
class InputMissing(BadTxTemplate):
reject_reason = "bad-txns-vin-empty"
expect_disconnect = True
# We use a blank transaction here to make sure
# it is interpreted as a non-witness transaction.
# Otherwise the transaction will fail the
# "surpufluous witness" check during deserialization
# rather than the input count check.
def get_tx(self):
tx = CTransaction()
tx.calc_sha256()
return tx
# The following check prevents exploit of lack of merkle
# tree depth commitment (CVE-2017-12842)
class SizeTooSmall(BadTxTemplate):
reject_reason = "tx-size-small"
expect_disconnect = False
valid_in_block = True
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE])))
tx.calc_sha256()
return tx
class BadInputOutpointIndex(BadTxTemplate):
# Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins
# database can't distinguish between spent outpoints and outpoints which never existed.
reject_reason = None
expect_disconnect = False
def get_tx(self):
num_indices = len(self.spend_tx.vin)
bad_idx = num_indices + 100
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256, bad_idx), b"", 0xffffffff))
tx.vout.append(CTxOut(0, basic_p2sh))
tx.calc_sha256()
return tx
class DuplicateInput(BadTxTemplate):
reject_reason = 'bad-txns-inputs-duplicate'
expect_disconnect = True
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
tx.vin.append(self.valid_txin)
tx.vout.append(CTxOut(1, basic_p2sh))
tx.calc_sha256()
return tx
class NonexistentInput(BadTxTemplate):
reject_reason = None # Added as an orphan tx.
expect_disconnect = False
def get_tx(self):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256 + 1, 0), b"", 0xffffffff))
tx.vin.append(self.valid_txin)
tx.vout.append(CTxOut(1, basic_p2sh))
tx.calc_sha256()
return tx
class SpendTooMuch(BadTxTemplate):
reject_reason = 'bad-txns-in-belowout'
expect_disconnect = True
def get_tx(self):
return create_tx_with_script(
self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1))
class SpendNegative(BadTxTemplate):
reject_reason = 'bad-txns-vout-negative'
expect_disconnect = True
def get_tx(self):
return create_tx_with_script(self.spend_tx, 0, amount=-1)
class InvalidOPIFConstruction(BadTxTemplate):
reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)"
expect_disconnect = True
valid_in_block = True
def get_tx(self):
return create_tx_with_script(
self.spend_tx, 0, script_sig=b'\x64' * 35,
amount=(self.spend_avail // 2))
class TooManySigops(BadTxTemplate):
reject_reason = "bad-txns-too-many-sigops"
block_reject_reason = "bad-blk-sigops, out-of-bounds SigOpCount"
expect_disconnect = False
def get_tx(self):
lotsa_checksigs = sc.CScript([sc.OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
return create_tx_with_script(
self.spend_tx, 0,
script_pub_key=lotsa_checksigs,
amount=1)
def getDisabledOpcodeTemplate(opcode):
""" Creates disabled opcode tx template class"""
def get_tx(self):
tx = CTransaction()
vin = self.valid_txin
vin.scriptSig = CScript([opcode])
tx.vin.append(vin)
tx.vout.append(CTxOut(1, basic_p2sh))
tx.calc_sha256()
return tx
return type('DisabledOpcode_' + str(opcode), (BadTxTemplate,), {
'reject_reason': "disabled opcode",
'expect_disconnect': True,
'get_tx': get_tx,
'valid_in_block' : True
})
# Disabled opcode tx templates (CVE-2010-5137)
DisabledOpcodeTemplates = [getDisabledOpcodeTemplate(opcode) for opcode in [
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_2MUL,
OP_2DIV,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT]]
def iter_all_templates():
"""Iterate through all bad transaction template types."""
return BadTxTemplate.__subclasses__()
|
[
"wuemma@protonmail.com"
] |
wuemma@protonmail.com
|
0fee4123dd316b974c3fdd92e1ace45e6046c0e7
|
1f40a08ee85ef6f78384e6f6f53bcf3f86b8c44b
|
/shorten/app/views.py
|
fec1ecdf840fbfdd7d0588f916a668b2701fdb4d
|
[] |
no_license
|
infsolution/EncurtUrl
|
bff4543fb17f3c2a6853c64abc24d307abcd04bf
|
0f6d8aa23a2498a8bf5575797db9a5a8eb855403
|
refs/heads/master
| 2020-05-14T09:31:39.265337
| 2019-09-28T17:44:25
| 2019-09-28T17:44:25
| 181,741,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,523
|
py
|
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, InvalidPage
from django.shortcuts import render, redirect
from django.http import JsonResponse
from rest_framework import generics
from .models import *
from .forms import *
def index(request):
perfil_logado = get_perfil_logado(request)
return render(request,'app/index.html',{"title_page":"O melhor encurtador","perfil_logado":perfil_logado})
def get_perfil_logado(request):
try:
perfil = Perfil.objects.get(user=request.user)
except Exception as e:
return None
return perfil
def shorten(request):
if request.GET.get('url'):
short = Shortened(perfil=get_perfil_logado(request), url_user=request.GET.get('url'))
short.shorten()
if request.GET.getlist('private'):
short.get_private_code()
if request.GET.getlist('preview'):
short.preview=True
short.preview_message = request.GET.get('preview_msg')
short.save()
return render(request, 'app/showurl.html',{"url_short":short.url_shortened,"perfil_logado":get_perfil_logado(request),
"title_page":"TShort: Sua url encurtada"})
return render(request,'app/urlnotfound.html', {"value":"Nenhuma url foi informada",
"title_page":"Url Não encontrada","perfil_logado":get_perfil_logado(request)})
@login_required
def shotened_report(request):
ITEMS_PER_PAGE = 5
perfil_logado = get_perfil_logado(request)
shorteneds = Shortened.objects.filter(perfil=perfil_logado)
paginator = Paginator(shorteneds, ITEMS_PER_PAGE)
page = request.GET.get('page',1)
try:
short_page = paginator.get_page(page)
except InvalidPage:
short_page = paginator.get_page(1)
return render(request, 'app/report.html',{"shorteneds":short_page,"perfil_logado":perfil_logado})
@login_required
def detail(request, shortened_id):
shorten = Shortened.objects.get(id=shortened_id)
return render(request, 'app/report_detail.html', {'shorten':shorten, 'perfil_logado':get_perfil_logado(request)})
def go_to_url(request, shortened):
if request.method == 'GET':
try:
short = Shortened.objects.get(url_shortened=shortened)
get_click(request,short)
except Exception as e:
return render(request,'app/urlnotfound.html', {"value":shortened,"error":e, "title_page":"Url Não encontrada"})
if short.private_code != None:
return render(request, 'app/private_access.html',{"short":short})
if short.preview:
return render(request, 'app/preview.html',{'short':short, 'perfil_logado':get_perfil_logado(request)})
return redirect(short.url_user)
def create_user(request):
if request.method == 'POST':
form = UserModelForm(request.POST)
if form.is_valid():
if request.POST['last-password'] == request.POST['password']:
user = User.objects.create_user(request.POST['username'], request.POST['email'], request.POST['last-password'])#validar se as senhas são igauis
perfil = Perfil(name=user.username, user=user)
perfil.save()
return render(request, 'app/add.html', {'form':UserModelForm(), 'alert_type':'success', 'msg_confirm':'Parabéns seu cadastro foi realizado.'})
else:
return render(request, 'app/add.html', {'form':UserModelForm(),'alert_type':'danger' , 'msg_confirm':'As senhas não são iguais'})
return render(request, 'app/add.html',{'form':UserModelForm(request.POST), 'alert_type':'danger','msg_confirm':'Ocorreu um erro ao realizar o cadastro.'})
form = UserModelForm()
return render(request, 'app/add.html', {"form":form})
'''def do_login(request):
if request.method == 'POST':
user = authenticate(username = request.POST['username'], password = request.POST['password'])
if user is not None:
login(request,user)
#return redirect('/app/'+str(user.id), user)
return redirect('index')
return render(request,'app/login.html' ,{"error_msg":"Usuário ou senha Invalidos"})
return render(request, 'app/login.html')'''
def do_logout(request):
logout(request)
return redirect('/login/')
def access_private(request):
if request.method == 'POST':
short = Shortened.objects.get(url_shortened=request.POST['url_shortened'])
if request.POST.get('private_code') == short.private_code:
return redirect(short.url_user)
return render(request, 'app/private_access.html',{"short":short, "error_msg":"Código inválido"})
@login_required
def get_contatos(request):
return render(request, 'app/contatos.html', {"perfil_logado":get_perfil_logado(request)})
def request_access(request, codeurl):
if request.method == 'POST':
short = Shortened.objects.get(url_shortened=codeurl)
if send_message(short):
return render(request,'app/request_access.html',{"code":codeurl,"msg":"Sua solicitação foi enviada. Aquarde contato."})
return render(request,'app/request_access.html',{"code":codeurl})
def send_message(short):
return True
def get_click(request, shortened):
shor = Click(shortened=shortened)
print(shor.save())
def about(request):
context = {}
if get_perfil_logado(request):
context = {"perfil_logado":get_perfil_logado(request)}
return render(request, 'app/about.html',context)
def help(request):
context = {}
if get_perfil_logado(request):
context = {"perfil_logado":get_perfil_logado(request)}
return render(request, 'app/help.html',context)
def personalize(request, shortened_id):
pass
def valid(request, url):
rersult = None
try:
url = Shortened.objects.get(url_shortened=url)
rersult = True
except Exception as e:
rersult = False
return JsonResponse({'result':result})
#API#
|
[
"clsinfsolution@gmail.com"
] |
clsinfsolution@gmail.com
|
ce8203a37a0d73246f63399116e942a387aa6b19
|
38eb57300418e6f10433630437388f779ce50e09
|
/rbac_permission/rbac/servers/permission.py
|
4fc6af516966b9eb74fc2a0ed9e12b36cfe54973
|
[] |
no_license
|
SelfShadows/Django-Flask
|
f37839f763133f0d62bffad3128171c426a1c038
|
13e32d1c8aac1532b43323e1891c423fe78f2813
|
refs/heads/master
| 2021-01-04T12:31:18.018508
| 2020-02-14T16:29:27
| 2020-02-14T16:29:27
| 240,550,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,633
|
py
|
def init_session(request, user):
# 在session中注册用户 ID
request.session["user_id"] = user.pk
# 方法1
# # 查询当前用户登陆的所有权限 distinct(去重)
# permission = user.roles.all().values("permissions__url").distinct()
# permission_list = []
# for item in permission:
# permission_list.append(item["permissions__url"])
# print(permission_list)
# request.session["permission_list"] = permission_list
# 方法2
permissions = user.roles.all().values("permissions__url", "permissions__action", "permissions__group_id")
permissions_dict = {}
for item in permissions:
group_id = item["permissions__group_id"]
# 键不在字典里
if group_id not in permissions_dict:
permissions_dict[group_id] = {
"urls": [item["permissions__url"]],
"actions": [item["permissions__action"]],
}
# 键在字典里
else:
permissions_dict[group_id]["urls"].append(item["permissions__url"])
permissions_dict[group_id]["actions"].append(item["permissions__action"])
print(permissions_dict)
request.session["permissions_dict"] = permissions_dict
ret = user.roles.all().values("permissions__url", "permissions__action", "permissions__group__name",)
print("ret:", ret)
menu_permission_list = []
for item in ret:
if item["permissions__action"] == "list":
menu_permission_list.append((item["permissions__url"], item["permissions__group__name"]))
request.session["menu_permission_list"] = menu_permission_list
|
[
"870670791@qq.com"
] |
870670791@qq.com
|
7609d8654867171cc043ee30d5b4edc4ba5d48f2
|
ed8db15dad4236ada32c0355e032dc996266a271
|
/Advance_Python/8. Inheritance/4. ConstructorOverriding.py
|
207ba7b1ce97664ee242397920676842b2750dc9
|
[] |
no_license
|
mukund1985/Python-Tutotrial
|
a01e0c3ea77690c23c6f30ba1a157c450e5a53ed
|
bfcf0c81029ce2bee4aa855d90661df25cc94ef9
|
refs/heads/master
| 2021-05-21T15:41:18.018660
| 2020-11-04T02:20:30
| 2020-11-04T02:20:30
| 309,857,690
| 1
| 0
| null | 2020-11-04T02:14:37
| 2020-11-04T02:11:38
|
Python
|
UTF-8
|
Python
| false
| false
| 468
|
py
|
# Constructor Overriding
class Father: # Parent Class
def __init__(self):
self.money = 1000
print("Father Class Constructor")
def show(self):
print("Father Class Instance Method")
class Son(Father): # Child Class
def __init__(self):
self.money = 5000
self.car = 'BMW'
print("Son Class Constructor")
def disp(self):
print("Son Class Instance Method")
s = Son()
print(s.money)
print(s.car)
s.disp()
s.show()
|
[
"mukund.pandey@gmail.com"
] |
mukund.pandey@gmail.com
|
bacf596e1202013a98cc40f7d2940d69b8a2e216
|
afa0d5a97925273f7fb0befef697d36020df5787
|
/packages/google-cloud-alloydb/samples/generated_samples/alloydb_v1_generated_alloy_db_admin_get_cluster_sync.py
|
eb296f332c1d78fa23a91559a1674b1100657a4a
|
[
"Apache-2.0"
] |
permissive
|
scooter4j/google-cloud-python
|
dc7ae1ba6a33a62a40b617b806ec8ed723046b8b
|
36b1cf08092d5c07c5971bb46edda7a9928166b1
|
refs/heads/master
| 2023-04-14T18:36:48.643436
| 2023-04-06T13:19:26
| 2023-04-06T13:19:26
| 188,338,673
| 0
| 0
| null | 2019-05-24T02:27:15
| 2019-05-24T02:27:14
| null |
UTF-8
|
Python
| false
| false
| 1,805
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetCluster
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-alloydb
# [START alloydb_v1_generated_AlloyDBAdmin_GetCluster_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import alloydb_v1
def sample_get_cluster():
# Create a client
client = alloydb_v1.AlloyDBAdminClient()
# Initialize request argument(s)
request = alloydb_v1.GetClusterRequest(
name="name_value",
)
# Make the request
response = client.get_cluster(request=request)
# Handle the response
print(response)
# [END alloydb_v1_generated_AlloyDBAdmin_GetCluster_sync]
|
[
"noreply@github.com"
] |
scooter4j.noreply@github.com
|
ba83ffc60ad253aed46ec0172ef01d949a01742e
|
57ddab24ba7860f8878c689f9fa22b0779d60157
|
/categorias/iniciante/uri1051.py
|
cb688fa9eb809d54eefeb058ecb54ada5c421f65
|
[] |
no_license
|
matheusfelipeog/uri-judge
|
ba1d32e50ad7239b331ad0e1181a1bffc6e61b41
|
0232be52da78fd67261c6d6a74eff3267d423afd
|
refs/heads/master
| 2021-07-03T02:32:13.395829
| 2021-01-29T18:32:35
| 2021-01-29T18:32:35
| 215,845,427
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
# -*- coding: utf-8 -*-
salario = float(input())
if 0.0 <= salario <= 2000.00:
print('Isento')
elif 2000.01 <= salario <= 3000.00:
imposto = ((salario - 2000) * 0.08)
print('R$ {:.2f}'.format(imposto))
elif 3000.01 <= salario <= 4500.00:
imposto = (1000 * 0.08) + ((salario - 3000) * 0.18)
print('R$ {:.2f}'.format(imposto))
elif salario > 4500.00:
imposto = (1000 * 0.08) + (1500 * 0.18) + ((salario - 4500) * 0.28)
print('R$ {:.2f}'.format(imposto))
|
[
"50463866+matheusfelipeog@users.noreply.github.com"
] |
50463866+matheusfelipeog@users.noreply.github.com
|
dba98931ab1055fbc8aa7f09f7f007a014124723
|
687928e5bc8d5cf68d543005bb24c862460edcfc
|
/nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbvserver_dospolicy_binding.py
|
465c32d9a481652819921910b414eaf9319e4bd3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] |
permissive
|
mbs91/nitro
|
c6c81665d6abd04de8b9f09554e5e8e541f4a2b8
|
be74e1e177f5c205c16126bc9b023f2348788409
|
refs/heads/master
| 2021-05-29T19:24:04.520762
| 2015-06-26T02:03:09
| 2015-06-26T02:03:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,123
|
py
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_dospolicy_binding(base_resource) :
""" Binding class showing the dospolicy that can be bound to lbvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._name = ""
self.___count = 0
@property
def priority(self) :
"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
"""Name of the policy bound to the LB vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the policy bound to the LB vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_dospolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_dospolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch lbvserver_dospolicy_binding resources.
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of lbvserver_dospolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count lbvserver_dospolicy_binding resources configued on NetScaler.
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of lbvserver_dospolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_dospolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_dospolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_dospolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_dospolicy_binding = [lbvserver_dospolicy_binding() for _ in range(length)]
|
[
"bensassimaha@gmail.com"
] |
bensassimaha@gmail.com
|
568adf917a33a914cba15a49c8c76eec78d9e70c
|
8fa8ded3772dd7a124c1bbb91fc109ed2b63574b
|
/mycelium/apps/data_import/ajax_backends.py
|
1db6a810c321f46ba03880b7a3f42cb1ee69194c
|
[] |
no_license
|
skoczen/mycelium
|
3642b0f5e5ea03d609a3e499c7ad68092101dce0
|
da0f169163f4dc93e2dc2b0d934abf4f18c18af0
|
refs/heads/master
| 2020-04-10T09:21:46.893254
| 2014-05-20T02:27:06
| 2014-05-20T02:27:06
| 2,114,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
from ajaxuploader.backends.s3 import S3UploadBackend
from django.core.files.storage import default_storage
from spreadsheets.spreadsheet import SpreadsheetAbstraction
import time
class DataImportUploadBackend(S3UploadBackend):
def update_filename(self, request, filename):
return "import/%s/%s.%s" % (request.account.pk, int(time.time()), filename, )
def upload_complete(self, request, filename, **kwargs):
self._pool.close()
self._pool.join()
self._mp.complete_upload()
# filename is a file at s3. Get it.
f = default_storage.open(filename, 'r')
# parse the file.
s = SpreadsheetAbstraction(request.account, f, request.import_type, filename=filename)
f.close()
# get the number of rows
num_rows = s.num_rows
# see if it has a header
header_row = []
has_header = s.has_header
if s.has_header:
header_row = s.header_row
# get the first five columns
first_rows = s.get_rows(0,8)
return_dict = {
'num_rows': num_rows,
'first_rows': first_rows,
'header_row': header_row,
'has_header': has_header,
'filename':filename,
}
return return_dict
|
[
"steven@quantumimagery.com"
] |
steven@quantumimagery.com
|
53d7a1c756ba1e532f3b3fc6092768370b3a8b40
|
8eac548c15cdabeb662c9af2ca67994f92c255ee
|
/词性标注&词性提取/Word_Marking_test.py
|
75c73dfd2590d58fbea3ac14a141dd71b9fe05c0
|
[] |
no_license
|
yaolinxia/Chinese-word-segmentation
|
f7de7317509dc7ed53bb40e5a1367206bd36abc1
|
42d619ec838fe2f8c98822b15c69c640972b984e
|
refs/heads/master
| 2021-07-06T19:52:58.916128
| 2019-04-15T14:08:54
| 2019-04-15T14:08:54
| 117,522,537
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,607
|
py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
#1.先分好词,存在一个字符数组里面
#2.遍历字符数组,进行词性标注
import sys
import glob
import os
import xml.dom.minidom
import jieba
import jieba.posseg as pseg
#遍历某个文件夹下所有xml文件,path为存放xml的文件夹路径
#词性标注
def WorkMark(path):
#textCut=jieba.cut(text,cut_all=False)
#词性标注
with open(path, encoding="utf-8") as file_object:
contents = file_object.read()
textCut = pseg.cut(contents)
for ele in textCut:
print(ele)
result = ''
for word in textCut:
result +=word+' '
print('%s' % (word))
print('sucess WorkMark')
return result
#路径path下的内容写入进text中
def write_WorkMark(path,text):
f=open(path,'w',encoding='utf-8')
f.write(text)
f.close()
print('success write_WorkMark')
if __name__=='__main__':
#path1 = r'G:\研究生\法律文书\民事一审测试集\民事一审测试集'
#输出的结果路径
path2 = r'H:\python-workspace\test-path\test_QW_1-29.txt'
#path3 = r'H:\python-workspace\\1-5-testWenShu\\stopword.dic'
#path4:提取的字段路径
path4 = r'H:\python-workspace\1-12-testWenShu\test_QW_addDic.txt'
#path4=r'C:\Users\LFK\Desktop\1.txt'
#text = read_XMLFile(path1)
#write_segmentFile(path4, text)
# text=read_txt(path4)
result = WorkMark(path4)
write_WorkMark(path2,result)
"""
import jieba.posseg as pseg
words = pseg.cut("我爱北京天安门")
for word,flag in words:
print('%s %s' % (word, flag))
"""
|
[
"18860976931@163.com"
] |
18860976931@163.com
|
9c12bac03eea6ed28261ea89f8c3810743a52f26
|
2ca88d41f1bb5042338faec50b2af11931db0bdd
|
/src/gluonts/nursery/tsbench/src/cli/analysis/__init__.py
|
b939423a365224aa385b570ac9ecec6deacdf291
|
[
"Apache-2.0"
] |
permissive
|
canerturkmen/gluon-ts
|
2f2d46f9b01f5ee07a51a11e822b1c72c2475caa
|
57ae07f571ff123eac04af077870c1f216f99d5c
|
refs/heads/master
| 2022-09-10T23:30:26.162245
| 2022-04-20T12:44:01
| 2022-04-20T12:44:01
| 192,873,578
| 1
| 2
|
Apache-2.0
| 2020-08-04T16:58:48
| 2019-06-20T07:43:07
|
Python
|
UTF-8
|
Python
| false
| false
| 848
|
py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from ._main import analysis
from .ensemble import ensemble # type: ignore
from .ensemble_recommender import ensemble_recommender # type: ignore
from .recommender import recommender # type: ignore
from .surrogate import surrogate # type: ignore
__all__ = ["analysis"]
|
[
"noreply@github.com"
] |
canerturkmen.noreply@github.com
|
94ef7ad99668a3d0c890a8be2fd256bf28ab9194
|
1b8530ef1c108e098edfa3755e96824b31d4a2ad
|
/scripts/fixup_recommender_v1beta1_keywords.py
|
c0fe44525fc4175047ea2372ca698f42c4445c7e
|
[
"Apache-2.0"
] |
permissive
|
renovate-bot/python-recommender
|
4b3d0b9e0332eab0f71bd044a6832b67fe6827fa
|
d0ff05f566d2a7bfe6c9f403252a833fe4bb776b
|
refs/heads/master
| 2023-06-08T00:27:33.316110
| 2021-08-18T13:40:32
| 2021-08-18T13:40:32
| 239,139,952
| 0
| 0
|
Apache-2.0
| 2020-02-08T13:52:09
| 2020-02-08T13:52:08
| null |
UTF-8
|
Python
| false
| false
| 6,468
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class recommenderCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'get_insight': ('name', ),
'get_recommendation': ('name', ),
'list_insights': ('parent', 'page_size', 'page_token', 'filter', ),
'list_recommendations': ('parent', 'page_size', 'page_token', 'filter', ),
'mark_insight_accepted': ('name', 'etag', 'state_metadata', ),
'mark_recommendation_claimed': ('name', 'etag', 'state_metadata', ),
'mark_recommendation_failed': ('name', 'etag', 'state_metadata', ),
'mark_recommendation_succeeded': ('name', 'etag', 'state_metadata', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=recommenderCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the recommender client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
[
"noreply@github.com"
] |
renovate-bot.noreply@github.com
|
ebbfa4ce3921743a2cac5d388e06a808086b00de
|
71cc62fe3fec8441794a725b7ce3037dc2723107
|
/ifreewallpapers/apps/profile/views/profileviews.py
|
71d4bc4644c89e4257d2ab7d6120ca761ceb5375
|
[] |
no_license
|
tooxie/django-ifreewallpapers
|
bda676dc5a6c45329ad6763862fe696b3e0c354b
|
75d8f41a4c6aec5c1091203823c824c4223674a6
|
refs/heads/master
| 2020-05-21T12:50:36.907948
| 2011-01-19T04:28:33
| 2011-01-19T04:28:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
# coding=UTF-8
from profile.models import Profile
# from profile import settings as _settings
from utils.decorators import render_response
to_response = render_response('profile/')
# from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
# from django.core.urlresolvers import reverse
# from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
"""
@to_response
def overview(request, ):
return 'profile.html'
"""
@to_response
def public(request, slug):
profile = Profile.objects.get(slug=slug)
return 'public.html', {'profile': profile}
|
[
"alvaro@mourino.net"
] |
alvaro@mourino.net
|
742c4ddf5eaa9d24d8ab85cf042455635e024227
|
ff692d927c95f7337339599d523f986f720449f5
|
/plugins/init.py
|
692fec3901386220bb48bf4cea4ae5a20c1c2897
|
[] |
no_license
|
mwesterhof/pyjeeves
|
de567966636954aed7d88a5d51e74df85feeaba3
|
46b35f56056603330f7636a745e13fa045c884f1
|
refs/heads/master
| 2022-12-16T11:11:06.276555
| 2019-08-09T09:10:21
| 2019-08-09T09:10:21
| 296,265,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
import os
from plugin import BasePlugin
class Plugin(BasePlugin):
'''
Initialize an HQ in the current directory
'''
def run_command(self, args):
print('creating jeeves headquarters in {0}'.format(os.getcwd()))
os.makedirs('.jeeves')
|
[
"m.westerhof@lukkien.com"
] |
m.westerhof@lukkien.com
|
eeb6eb58ee42c5bc5f72743af750f3d566f3361e
|
aaa204ad7f134b526593c785eaa739bff9fc4d2a
|
/tests/providers/amazon/aws/hooks/test_glacier.py
|
4ed3f6aaa2e24f18b4e5a28d34007275140c31de
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
cfei18/incubator-airflow
|
913b40efa3d9f1fdfc5e299ce2693492c9a92dd4
|
ffb2078eb5546420864229cdc6ee361f89cab7bd
|
refs/heads/master
| 2022-09-28T14:44:04.250367
| 2022-09-19T16:50:23
| 2022-09-19T16:50:23
| 88,665,367
| 0
| 1
|
Apache-2.0
| 2021-02-05T16:29:42
| 2017-04-18T20:00:03
|
Python
|
UTF-8
|
Python
| false
| false
| 5,075
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import unittest
from unittest import mock
from airflow.providers.amazon.aws.hooks.glacier import GlacierHook
CREDENTIALS = "aws_conn"
VAULT_NAME = "airflow"
JOB_ID = "1234abcd"
REQUEST_RESULT = {"jobId": "1234abcd"}
RESPONSE_BODY = {"body": "data"}
JOB_STATUS = {"Action": "", "StatusCode": "Succeeded"}
class TestAmazonGlacierHook(unittest.TestCase):
def setUp(self):
with mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.__init__", return_value=None):
self.hook = GlacierHook(aws_conn_id="aws_default")
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_should_return_job_id(self, mock_conn):
# Given
job_id = {"jobId": "1234abcd"}
# when
mock_conn.return_value.initiate_job.return_value = job_id
result = self.hook.retrieve_inventory(VAULT_NAME)
# then
mock_conn.assert_called_once_with()
assert job_id == result
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_should_log_mgs(self, mock_conn):
# given
job_id = {"jobId": "1234abcd"}
# when
with self.assertLogs() as log:
mock_conn.return_value.initiate_job.return_value = job_id
self.hook.retrieve_inventory(VAULT_NAME)
# then
self.assertEqual(
log.output,
[
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Retrieving inventory for vault: {VAULT_NAME}",
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Initiated inventory-retrieval job for: {VAULT_NAME}",
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Retrieval Job ID: {job_id.get('jobId')}",
],
)
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_results_should_return_response(self, mock_conn):
# when
mock_conn.return_value.get_job_output.return_value = RESPONSE_BODY
response = self.hook.retrieve_inventory_results(VAULT_NAME, JOB_ID)
# then
mock_conn.assert_called_once_with()
assert response == RESPONSE_BODY
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_results_should_log_mgs(self, mock_conn):
# when
with self.assertLogs() as log:
mock_conn.return_value.get_job_output.return_value = REQUEST_RESULT
self.hook.retrieve_inventory_results(VAULT_NAME, JOB_ID)
# then
self.assertEqual(
log.output,
[
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Retrieving the job results for vault: {VAULT_NAME}...",
],
)
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_describe_job_should_return_status_succeeded(self, mock_conn):
# when
mock_conn.return_value.describe_job.return_value = JOB_STATUS
response = self.hook.describe_job(VAULT_NAME, JOB_ID)
# then
mock_conn.assert_called_once_with()
assert response == JOB_STATUS
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_describe_job_should_log_mgs(self, mock_conn):
# when
with self.assertLogs() as log:
mock_conn.return_value.describe_job.return_value = JOB_STATUS
self.hook.describe_job(VAULT_NAME, JOB_ID)
# then
self.assertEqual(
log.output,
[
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Retrieving status for vault: {VAULT_NAME} and job {JOB_ID}",
'INFO:airflow.providers.amazon.aws.hooks.glacier.GlacierHook:'
f"Job status: {JOB_STATUS.get('Action')}, code status: {JOB_STATUS.get('StatusCode')}",
],
)
|
[
"noreply@github.com"
] |
cfei18.noreply@github.com
|
59655e6a2e6c1bf1df975866337b053b89e1ae57
|
111866dd2150170e90e3717df008aa703d7ef30c
|
/filemanager/domain/__init__.py
|
7f428c6590b0064d5266fb7018c0cdcc07f789b4
|
[] |
no_license
|
arXiv/arxiv-filemanager
|
106c572a6551445a2109c279ce086b7c96a0bcd5
|
dfb71a40125324b1c1f4eb865c84cd9d2e512e6c
|
refs/heads/develop
| 2023-04-18T09:45:35.338067
| 2020-03-09T14:59:19
| 2020-03-09T14:59:19
| 113,456,994
| 5
| 6
| null | 2022-12-08T05:50:07
| 2017-12-07T13:55:34
|
PostScript
|
UTF-8
|
Python
| false
| false
| 388
|
py
|
"""Core concepts and constraints of the file manager service."""
from .uploads import UserFile, Workspace, IChecker, SourceLog, SourceType, \
IStorageAdapter, SourcePackage, ICheckableWorkspace, Readiness, \
Status, LockState
from .file_type import FileType
from .uploads import ICheckingStrategy
from .error import Error, Severity, Code
from .index import NoSuchFile, FileIndex
|
[
"brp53@cornell.edu"
] |
brp53@cornell.edu
|
7adeb154143a4cfd6b5b6ee2b93edaf9c86afaa2
|
b526aecc3aeb35c0931339ede80397f8f1561fbc
|
/src/dascasi/__init__.py
|
b262c04aa7f3b2df6609d02475bf132c1456c87d
|
[
"Apache-2.0"
] |
permissive
|
space-physics/dascasi
|
30e021976529dfc4072ea96181db8d9d1921a07c
|
4d72aa91e471a495566044c3fc387344dd12461f
|
refs/heads/main
| 2023-04-17T09:24:22.325605
| 2023-03-21T02:18:44
| 2023-03-21T02:30:15
| 51,016,067
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
# Copyright 2023 SciVision, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.0.0"
from .web import download
from .io import load, loadcal
from .hdf5 import save_hdf5
__all__ = ["download", "load", "loadcal", "save_hdf5"]
|
[
"scivision@users.noreply.github.com"
] |
scivision@users.noreply.github.com
|
e726984d4019bc6974ee4b2702b243d18c0669f7
|
73758dde83d1a1823c103e1a4ba71e7c95168f71
|
/nsd2006/devops/day02/local_mail.py
|
3533d97f07484696810b548f9f0931ac688dde15
|
[] |
no_license
|
tonggh220/md_5_nsd_notes
|
07ffdee7c23963a7a461f2a2340143b0e97bd9e1
|
a58a021ad4c7fbdf7df327424dc518f4044c5116
|
refs/heads/master
| 2023-07-02T01:34:38.798929
| 2021-05-12T08:48:40
| 2021-05-12T08:48:40
| 393,885,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
from email.mime.text import MIMEText
from email.header import Header
import smtplib
# 准备邮件, plain表示纯文本
message = MIMEText("python local email test.\n", 'plain', 'utf8') # 正文
message['From'] = Header('root', 'utf8')
message['To'] = Header('zhangsan', 'utf8')
message['Subject'] = Header('py test', 'utf8')
# 发送邮件
smtp = smtplib.SMTP()
smtp.connect('localhost')
smtp.sendmail('root', ['root', 'zhangsan'], message.as_bytes())
|
[
"zhangzg@tedu.cn"
] |
zhangzg@tedu.cn
|
aacc2ade3e3b635267e0895250241964852a07f3
|
51b20543e5ed606636bcde9fba329e5fa948de2e
|
/communityprofiles/census/remote_file.py
|
2bab92682c5bab5ea49d416570d119d3a9aaa99d
|
[
"MIT"
] |
permissive
|
216software/Profiles
|
b821112225e8522b7b558cab87ae1c12c68c653b
|
651da880a3d4295243205bdae4de88504edc91de
|
refs/heads/dev
| 2023-03-16T04:49:01.389186
| 2023-03-09T17:04:04
| 2023-03-09T17:04:04
| 59,139,518
| 3
| 0
| null | 2016-05-18T18:02:53
| 2016-05-18T18:02:53
| null |
UTF-8
|
Python
| false
| false
| 3,612
|
py
|
# for RemoteFileObject
from os import SEEK_SET, SEEK_CUR, SEEK_END
from time import time
from datetime import timedelta
from urlparse import urlparse, urljoin
from cStringIO import StringIO
from httplib import HTTPConnection
from urllib import urlopen
from zipfile import ZipFile
from itertools import izip
class RemoteFileObject:
""" Implement enough of this to be useful:
http://docs.python.org/release/2.5.2/lib/bltin-file-objects.html
Pull data from a remote URL with HTTP range headers.
"""
def __init__(self, url, verbose=False, block_size=(16 * 1024)):
self.verbose = verbose
# scheme://host/path;parameters?query#fragment
(scheme, host, path, parameters, query, fragment) = urlparse(url)
self.host = host
self.rest = path + (query and ('?' + query) or '')
self.offset = 0
self.length = self.get_length()
self.chunks = {}
self.block_size = block_size
self.start_time = time()
def get_length(self):
"""
"""
conn = HTTPConnection(self.host)
conn.request('GET', self.rest, headers={'Range': '0-1'})
length = int(conn.getresponse().getheader('content-length'))
if self.verbose:
print >> stderr, length, 'bytes in', basename(self.rest)
return length
def get_range(self, start, end):
"""
"""
headers = {'Range': 'bytes=%(start)d-%(end)d' % locals()}
conn = HTTPConnection(self.host)
conn.request('GET', self.rest, headers=headers)
return conn.getresponse().read()
def read(self, count=None):
""" Read /count/ bytes from the resource at the current offset.
"""
if count is None:
# to the end
count = self.length - self.offset
out = StringIO()
while count:
chunk_offset = self.block_size * (self.offset / self.block_size)
if chunk_offset not in self.chunks:
range = chunk_offset, min(self.length, self.offset + self.block_size) - 1
self.chunks[chunk_offset] = StringIO(self.get_range(*range))
if self.verbose:
loaded = float(self.block_size) * len(self.chunks) / self.length
expect = (time() - self.start_time) / loaded
remain = max(0, int(expect * (1 - loaded)))
print >> stderr, '%.1f%%' % min(100, 100 * loaded),
print >> stderr, 'of', basename(self.rest),
print >> stderr, 'with', timedelta(seconds=remain), 'to go'
chunk = self.chunks[chunk_offset]
in_chunk_offset = self.offset % self.block_size
in_chunk_count = min(count, self.block_size - in_chunk_offset)
chunk.seek(in_chunk_offset, SEEK_SET)
out.write(chunk.read(in_chunk_count))
count -= in_chunk_count
self.offset += in_chunk_count
out.seek(0)
return out.read()
def seek(self, offset, whence=SEEK_SET):
""" Seek to the specified offset.
/whence/ behaves as with other file-like objects:
http://docs.python.org/lib/bltin-file-objects.html
"""
if whence == SEEK_SET:
self.offset = offset
elif whence == SEEK_CUR:
self.offset += offset
elif whence == SEEK_END:
self.offset = self.length + offset
def tell(self):
return self.offset
|
[
"asmedrano@gmail.com"
] |
asmedrano@gmail.com
|
7058f2f37989ff337436d6ecf89c51ed574d82ee
|
7f33c02743fbfd18726ffef08924f528354372dd
|
/Python_Projects/python3_selfstudy/priklady_z_knihy/k04/digit_names.py
|
1167be7a4ec6d9440194cb8be9928866a345010e
|
[] |
no_license
|
zabojnikp/study
|
a524eb9c2265a73e1db0b5f0e76b359c123a397b
|
43424bfc6641cd8fa13ab119ce283fb460b4ffc1
|
refs/heads/master
| 2020-04-06T14:21:55.786353
| 2018-11-27T22:10:48
| 2018-11-27T22:10:48
| 157,538,244
| 0
| 0
| null | 2018-11-27T22:10:49
| 2018-11-14T11:24:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2008-9 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import sys
Language = "cs"
ENGLISH = {0: "zero", 1: "one", 2: "two", 3: "three", 4: "four",
5: "five", 6: "six", 7: "seven", 8: "eight", 9: "nine"}
CZECH = {0: "nula", 1: "jedna", 2: "dvě", 3: "tři", 4: "čtyři",
5: "pět", 6: "šest", 7: "sedm", 8: "osm", 9: "devět"}
def main():
if len(sys.argv) == 1 or sys.argv[1] in {"-h", "--help"}:
print("použití: {0} [en|cs] číslo".format(sys.argv[0]))
sys.exit()
args = sys.argv[1:]
if args[0] in {"en", "cs"}:
global Language
Language = args.pop(0)
print_digits(args.pop(0))
def print_digits(digits):
dictionary = ENGLISH if Language == "en" else CZECH
for digit in digits:
print(dictionary[int(digit)], end=" ")
print()
main()
|
[
"zabojnikova.petra@gmail.com"
] |
zabojnikova.petra@gmail.com
|
8b20ab0b23ca75e102d3f7c1bd8017bf3ac1b485
|
22dcbf9595c28279b681caac26e43113ce75de5c
|
/automl/cloud-client/import_dataset_test.py
|
35d23edc7e8fc745ed598a895c037a49b9cc7f90
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
jerry-enebeli/python-docs-samples
|
2e61672e9819405733c94ef824ba0b0f92b3e930
|
0d78724126ce25dd6203cfd2ee3467b88e5c27b9
|
refs/heads/master
| 2022-12-12T18:05:16.899492
| 2020-09-01T22:35:40
| 2020-09-01T22:35:40
| 292,189,370
| 1
| 0
|
Apache-2.0
| 2020-09-02T05:39:23
| 2020-09-02T05:39:22
| null |
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import import_dataset
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
BUCKET_ID = "{}-lcm".format(PROJECT_ID)
DATASET_ID = "TEN0000000000000000000"
def test_import_dataset(capsys):
# As importing a dataset can take a long time and only four operations can
# be run on a dataset at once. Try to import into a nonexistent dataset and
# confirm that the dataset was not found, but other elements of the request
# were valid.
try:
data = "gs://{}/sentiment-analysis/dataset.csv".format(BUCKET_ID)
import_dataset.import_dataset(PROJECT_ID, DATASET_ID, data)
out, _ = capsys.readouterr()
assert (
"The Dataset doesn't exist or is inaccessible for use with AutoMl."
in out
)
except Exception as e:
assert (
"The Dataset doesn't exist or is inaccessible for use with AutoMl."
in e.message
)
|
[
"noreply@github.com"
] |
jerry-enebeli.noreply@github.com
|
e658bf448865024182e1f4fcc107da7498d979d6
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_sponging.py
|
3002cc59058a485eaef0fe654fc8482c1eaff6ca
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#calss header
class _SPONGING():
def __init__(self,):
self.name = "SPONGING"
self.definitions = sponge
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['sponge']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
eb5007714d3e169fc67ff7de612d97edbdde15ae
|
ca805265bbc8d9b3f5ccb8dd343524843fc0f776
|
/scaling/commands/bench_results_processer.py
|
d94add1e14ef3dfdd535d410b4a504c6f1e7200a
|
[] |
no_license
|
alenzhao/QIIME-Scaling
|
8dc7b4b99da404c016e59e48197b8f938df1cf14
|
29408a3a0ff2a74039f78a04fff831dabb23fa1a
|
refs/heads/master
| 2021-01-12T10:46:22.961035
| 2016-06-18T16:56:48
| 2016-06-18T16:56:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
#!/usr/bin/env python
from __future__ import division
__author__ = "Jose Antonio Navas Molina"
__copyright__ = "Copyright 2013, The QIIME Scaling Project"
__credits__ = ["Jose Antonio Navas Molina"]
__license__ = "BSD"
__version__ = "0.0.2-dev"
__maintainer__ = "Jose Antonio Navas Molina"
__email__ = "josenavasmolina@gmail.com"
__status__ = "Development"
from pyqi.core.command import (Command, CommandIn, CommandOut,
ParameterCollection)
from scaling.process_results import (process_benchmark_results, CompData)
from scaling.cluster_util import wait_on
class BenchResultsProcesser(Command):
"""Subclassing the pyqi.core.command.Command class"""
BriefDescription = "Processes the benchmark suite results"
LongDescription = ("Takes the benchmark suite output directory and "
"processes the benchmark measurements, creating plots "
"and collapsing results in a usable form.")
CommandIns = ParameterCollection([
CommandIn(Name='bench_results', DataType=list,
Description='List with the benchmark results',
Required=True),
CommandIn(Name='job_ids', DataType=list,
Description='List of job ids to wait for if running in a '
'pbs cluster', Required=False)
])
CommandOuts = ParameterCollection([
CommandOut(Name="bench_data", DataType=CompData,
Description="Dictionary with the benchmark results"),
])
def run(self, **kwargs):
bench_results = kwargs['bench_results']
job_ids = kwargs['job_ids']
if job_ids:
wait_on(job_ids)
data = process_benchmark_results(bench_results)
return {'bench_data': data}
CommandConstructor = BenchResultsProcesser
|
[
"josenavasmolina@gmail.com"
] |
josenavasmolina@gmail.com
|
0d3e7da7d35dc0e85ff5002ba1b008ceca4e07f2
|
d489eb7998aa09e17ce8d8aef085a65f799e6a02
|
/lib/modules/powershell/situational_awareness/network/powerview/share_finder.py
|
b7eb7430bb04ecd40ca90099bbefd0f114040073
|
[
"MIT"
] |
permissive
|
fengjixuchui/invader
|
d36078bbef3d740f95930d9896b2d7dd7227474c
|
68153dafbe25e7bb821c8545952d0cc15ae35a3e
|
refs/heads/master
| 2020-07-21T19:45:10.479388
| 2019-09-26T11:32:38
| 2019-09-26T11:32:38
| 206,958,809
| 2
| 1
|
MIT
| 2019-09-26T11:32:39
| 2019-09-07T11:32:17
|
PowerShell
|
UTF-8
|
Python
| false
| false
| 6,300
|
py
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Find-DomainShare',
'Author': ['@harmj0y'],
'Description': ('Finds shares on machines in the domain. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Hosts to enumerate.',
'Required' : False,
'Value' : ''
},
'ComputerLDAPFilter' : {
'Description' : 'Host filter name to query AD for, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerSearchBase' : {
'Description' : 'Specifies the LDAP source to search through for computers',
'Required' : False,
'Value' : ''
},
'ComputerOperatingSystem' : {
'Description' : 'Return computers with a specific operating system, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerServicePack' : {
'Description' : 'Return computers with the specified service pack, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerSiteName' : {
'Description' : 'Return computers in the specific AD Site name, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'CheckShareAccess' : {
'Description' : 'Switch. Only display found shares that the local user has access to.',
'Required' : False,
'Value' : ''
},
'Server' : {
'Description' : 'Specifies an active directory server (domain controller) to bind to',
'Required' : False,
'Value' : ''
},
'SearchScope' : {
'Description' : 'Specifies the scope to search under, Base/OneLevel/Subtree (default of Subtree)',
'Required' : False,
'Value' : ''
},
'ResultPageSize' : {
'Description' : 'Specifies the PageSize to set for the LDAP searcher object.',
'Required' : False,
'Value' : ''
},
'ServerTimeLimit' : {
'Description' : 'Specifies the maximum amount of time the server spends searching. Default of 120 seconds.',
'Required' : False,
'Value' : ''
},
'Tombstone' : {
'Description' : 'Switch. Specifies that the search should also return deleted/tombstoned objects.',
'Required' : False,
'Value' : ''
},
'Delay' : {
'Description' : 'Delay between enumerating hosts, defaults to 0.',
'Required' : False,
'Value' : ''
},
'Jitter' : {
'Description' : 'Specifies the jitter (0-1.0) to apply to any specified -Delay, defaults to +/- 0.3.',
'Required' : False,
'Value' : ''
},
'Threads' : {
'Description' : 'The maximum concurrent threads to execute.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.strip_powershell_comments(moduleCode)
script += "\n" + moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
|
[
"noreply@github.com"
] |
fengjixuchui.noreply@github.com
|
fa1ad882fd1595df3715ec3227356ed30c4c6017
|
fc212767c6c838360b62a3dcd8030a1dfcbf62fc
|
/muddery/utils/quest_handler.py
|
7d19069f731f83b3001d9318edf55756332a4a5f
|
[
"BSD-3-Clause"
] |
permissive
|
caibingcheng/muddery
|
24d6eba76358621736e6a3d66333361239c35472
|
dcbf55f4e1c18a2c69576fd0edcec4699c1519b9
|
refs/heads/master
| 2021-05-19T09:49:19.319735
| 2020-03-29T03:55:51
| 2020-03-29T03:55:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,984
|
py
|
"""
QuestHandler handles a character's quests.
"""
from django.conf import settings
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from evennia.utils import logger
from muddery.utils.builder import build_object
from muddery.statements.statement_handler import STATEMENT_HANDLER
from muddery.utils.localized_strings_handler import _
from muddery.utils.exception import MudderyError
from muddery.utils.game_settings import GAME_SETTINGS
from muddery.worlddata.dao.quest_dependencies_mapper import QUEST_DEPENDENCIES
from muddery.mappings.quest_status_set import QUEST_STATUS_SET
from muddery.mappings.typeclass_set import TYPECLASS
class QuestHandler(object):
"""
Handles a character's quests.
"""
def __init__(self, owner):
"""
Initialize handler
"""
self.owner = owner
self.current_quests = owner.db.current_quests
self.finished_quests = owner.db.finished_quests
def accept(self, quest_key):
"""
Accept a quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key in self.current_quests:
return
# Create quest object.
new_quest = build_object(quest_key)
if not new_quest:
return
new_quest.set_owner(self.owner)
self.current_quests[quest_key] = new_quest
self.owner.msg({"msg": _("Accepted quest {C%s{n.") % new_quest.get_name()})
self.show_quests()
self.owner.show_location()
def remove_all(self):
"""
Remove all quests.
It will be called when quests' owner will be deleted.
"""
for quest_key in self.current_quests:
self.current_quests[quest_key].delete()
self.current_quests = []
def give_up(self, quest_key):
"""
Accept a quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if not GAME_SETTINGS.get("can_give_up_quests"):
logger.log_tracemsg("Can not give up quests.")
raise MudderyError(_("Can not give up this quest."))
if quest_key not in self.current_quests:
raise MudderyError(_("Can not find this quest."))
self.current_quests[quest_key].delete()
del(self.current_quests[quest_key])
if quest_key in self.finished_quests:
self.finished_quests.remove(quest_key)
self.show_quests()
def turn_in(self, quest_key):
"""
Turn in a quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key not in self.current_quests:
return
if not self.current_quests[quest_key].is_accomplished:
return
# Get quest's name.
name = self.current_quests[quest_key].get_name()
# Call turn in function in the quest.
self.current_quests[quest_key].turn_in()
# Delete the quest.
self.current_quests[quest_key].delete()
del (self.current_quests[quest_key])
self.finished_quests.add(quest_key)
self.owner.msg({"msg": _("Turned in quest {C%s{n.") % name})
self.show_quests()
self.owner.show_location()
def get_accomplished_quests(self):
"""
Get all quests that their objectives are accomplished.
"""
quests = set()
for quest in self.current_quests:
if self.current_quests[quest].is_accomplished():
quests.add(quest)
return quests
def is_accomplished(self, quest_key):
"""
Whether the character accomplished this quest or not.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key not in self.current_quests:
return False
return self.current_quests[quest_key].is_accomplished()
def is_not_accomplished(self, quest_key):
"""
Whether the character accomplished this quest or not.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if quest_key not in self.current_quests:
return False
return not self.current_quests[quest_key].is_accomplished()
def is_finished(self, quest_key):
"""
Whether the character finished this quest or not.
Args:
quest_key: (string) quest's key
Returns:
None
"""
return quest_key in self.finished_quests
def is_in_progress(self, quest_key):
"""
If the character is doing this quest.
Args:
quest_key: (string) quest's key
Returns:
None
"""
return quest_key in self.current_quests
def can_provide(self, quest_key):
"""
If can provide this quest to the owner.
Args:
quest_key: (string) quest's key
Returns:
None
"""
if self.is_finished(quest_key):
return False
if self.is_in_progress(quest_key):
return False
if not self.match_dependencies(quest_key):
return False
if not self.match_condition(quest_key):
return False
return True
def match_dependencies(self, quest_key):
"""
Check quest's dependencies
Args:
quest_key: (string) quest's key
Returns:
(boolean) result
"""
for dep in QUEST_DEPENDENCIES.filter(quest_key):
status = QUEST_STATUS_SET.get(dep.type)
if not status.match(self.owner, dep.dependency):
return False
return True
def match_condition(self, quest_key):
"""
Check if the quest matches its condition.
Args:
quest_key: (string) quest's key
Returns:
(boolean) result
"""
# Get quest's record.
model_name = TYPECLASS("QUEST").model_name
if not model_name:
return False
model_quest = apps.get_model(settings.WORLD_DATA_APP, model_name)
try:
record = model_quest.objects.get(key=quest_key)
return STATEMENT_HANDLER.match_condition(record.condition, self.owner, None)
except Exception as e:
logger.log_errmsg("Can't get quest %s's condition: %s" % (quest_key, e))
return False
def show_quests(self):
"""
Send quests to player.
"""
quests = self.return_quests()
self.owner.msg({"quests": quests})
def return_quests(self):
"""
Get quests' data.
"""
quests = []
for quest in self.current_quests.values():
info = {"dbref": quest.dbref,
"name": quest.name,
"desc": quest.db.desc,
"objectives": quest.return_objectives(),
"accomplished": quest.is_accomplished()}
quests.append(info)
return quests
def at_objective(self, object_type, object_key, number=1):
"""
Called when the owner may complete some objectives.
Call relative hooks.
Args:
object_type: (type) objective's type
object_key: (string) object's key
number: (int) objective's number
Returns:
None
"""
status_changed = False
for quest in self.current_quests.values():
if quest.at_objective(object_type, object_key, number):
status_changed = True
if quest.is_accomplished():
self.owner.msg({"msg":
_("Quest {C%s{n's goals are accomplished.") % quest.name})
if status_changed:
self.show_quests()
|
[
"luyijun999@gmail.com"
] |
luyijun999@gmail.com
|
96c76ae94d06dfc58e6363603425d800499d1a75
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/ml/azure-ai-ml/azure/ai/ml/identity/_internal/pipeline.py
|
66a31affbd4140ddaf931c75a2cb7069bbb5a312
|
[
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from azure.ai.ml._user_agent import USER_AGENT
from azure.core.configuration import Configuration
from azure.core.pipeline import Pipeline
from azure.core.pipeline.policies import (
ContentDecodePolicy,
CustomHookPolicy,
DistributedTracingPolicy,
HeadersPolicy,
HttpLoggingPolicy,
NetworkTraceLoggingPolicy,
ProxyPolicy,
RetryPolicy,
UserAgentPolicy,
)
from azure.core.pipeline.transport import RequestsTransport
def _get_config(**kwargs):
"""Configuration common to a/sync pipelines."""
config = Configuration(**kwargs)
config.custom_hook_policy = CustomHookPolicy(**kwargs)
config.headers_policy = HeadersPolicy(**kwargs)
config.http_logging_policy = HttpLoggingPolicy(**kwargs)
config.logging_policy = NetworkTraceLoggingPolicy(**kwargs)
config.proxy_policy = ProxyPolicy(**kwargs)
config.user_agent_policy = UserAgentPolicy(base_user_agent=USER_AGENT, **kwargs)
return config
def _get_policies(config, _per_retry_policies=None, **kwargs):
policies = [
config.headers_policy,
config.user_agent_policy,
config.proxy_policy,
ContentDecodePolicy(**kwargs),
config.retry_policy,
]
if _per_retry_policies:
policies.extend(_per_retry_policies)
policies.extend(
[
config.custom_hook_policy,
config.logging_policy,
DistributedTracingPolicy(**kwargs),
config.http_logging_policy,
]
)
return policies
def build_pipeline(transport=None, policies=None, **kwargs):
if not policies:
config = _get_config(**kwargs)
config.retry_policy = RetryPolicy(**kwargs)
policies = _get_policies(config, **kwargs)
if not transport:
transport = RequestsTransport(**kwargs)
return Pipeline(transport, policies=policies)
def build_async_pipeline(transport=None, policies=None, **kwargs):
from azure.core.pipeline import AsyncPipeline
if not policies:
from azure.core.pipeline.policies import AsyncRetryPolicy
config = _get_config(**kwargs)
config.retry_policy = AsyncRetryPolicy(**kwargs)
policies = _get_policies(config, **kwargs)
if not transport:
from azure.core.pipeline.transport import AioHttpTransport
transport = AioHttpTransport(**kwargs)
return AsyncPipeline(transport, policies=policies)
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
e1a369dc2579d3d7f7b2687df356ca92d408e5ca
|
6699b8944b71e86725fdc17bb5f9cd69e254b4eb
|
/leetcode/1448.count-good-nodes-in-binary-tree/solution.py
|
93c087524ccb57bbc8d3bb206aa6f474859f9b57
|
[] |
no_license
|
jadesym/interview
|
6099e663090408f548b4f4b0b17ae90bb60a7d46
|
5b6eecedfa1c7e496bcfe852e2d3896e993ff16e
|
refs/heads/main
| 2023-01-07T21:56:59.063542
| 2022-12-30T20:13:34
| 2022-12-30T20:13:34
| 41,118,644
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def goodNodes(self, root: TreeNode) -> int:
startMax = -(10 ** 5)
return self.dfs(root, startMax)
def dfs(self, node: TreeNode, maxSoFar: int) -> int:
good_nodes = 0
cur_node_max = max(maxSoFar, node.val)
# Handle if root node
if maxSoFar <= node.val:
good_nodes += 1
# print(node.val)
if node.left is not None:
good_nodes += self.dfs(node.left, cur_node_max)
if node.right is not None:
good_nodes += self.dfs(node.right, cur_node_max)
return good_nodes
|
[
"kfu@kfu-mn1.linkedin.biz"
] |
kfu@kfu-mn1.linkedin.biz
|
4c2902a924d3a9a14d643a543c10bb7afec70217
|
e043f008aaec14e006051c7609748729a78bef90
|
/tests/test_falconparser.py
|
e56ce14595d4071e3f0f9eb1c42050f17872770b
|
[
"MIT"
] |
permissive
|
DamianHeard/webargs
|
723f38789ae4be61247da2a94ab590e11c808da7
|
4bba0bb4ca7bef3d0c53fab8f9af632e9653b2ed
|
refs/heads/dev
| 2021-01-18T00:25:58.792302
| 2015-11-09T02:07:52
| 2015-11-09T02:07:52
| 45,103,859
| 0
| 0
| null | 2015-10-28T10:05:39
| 2015-10-28T10:05:39
| null |
UTF-8
|
Python
| false
| false
| 4,147
|
py
|
# -*- coding: utf-8 -*-
import json
import pytest
import falcon
import webtest
from webargs import fields
from webargs.falconparser import parser, use_args, use_kwargs
def use_args_hook(args, context_key='args', **kwargs):
def hook(req, resp, params):
parsed_args = parser.parse(args, req=req, **kwargs)
req.context[context_key] = parsed_args
return hook
@pytest.fixture()
def api():
api_ = falcon.API()
hello_args = {
'name': fields.Str(required=True)
}
class ParseResource(object):
def on_get(self, req, resp):
args = parser.parse(hello_args, req=req, locations=('query', 'headers', 'cookies'))
resp.body = json.dumps(args)
def on_post(self, req, resp):
args = parser.parse(hello_args, req=req, locations=('form', ))
resp.body = json.dumps(args)
def on_put(self, req, resp):
args = parser.parse(hello_args, req=req, locations=('json', ))
resp.body = json.dumps(args)
class UseArgsResource(object):
@use_args(hello_args)
def on_get(self, req, resp, args):
resp.body = json.dumps(args)
class UseArgsWithParamResource(object):
@use_args(hello_args)
def on_get(self, req, resp, args, _id):
args['_id'] = int(_id)
resp.body = json.dumps(args)
class UseKwargsResource(object):
@use_kwargs(hello_args)
def on_get(self, req, resp, name):
resp.body = json.dumps({'name': name})
class AlwaysErrorResource(object):
args = {'bad': fields.Field(validate=lambda x: False)}
def on_get(self, req, resp):
parser.parse(self.args, req=req)
@falcon.before(use_args_hook(hello_args))
class HookResource(object):
def on_get(self, req, resp):
resp.body(req.context['args'])
api_.add_route('/parse', ParseResource())
api_.add_route('/use_args', UseArgsResource())
api_.add_route('/use_args_with_param/{_id}', UseArgsWithParamResource())
api_.add_route('/use_kwargs', UseKwargsResource())
api_.add_route('/hook', UseKwargsResource())
api_.add_route('/error', AlwaysErrorResource())
return api_
@pytest.fixture()
def testapp(api):
return webtest.TestApp(api)
class TestParseResource:
url = '/parse'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred'}
def test_parse_form(self, testapp):
res = testapp.post(self.url, {'name': 'Fred'})
assert res.json == {'name': 'Fred'}
def test_parse_json(self, testapp):
res = testapp.put_json(self.url, {'name': 'Fred'})
assert res.json == {'name': 'Fred'}
def test_parse_headers(self, testapp):
res = testapp.get(self.url, headers={'name': 'Fred'})
assert res.json == {'name': 'Fred'}
def test_parsing_cookies(self, testapp):
testapp.set_cookie('name', 'Fred')
assert testapp.get(self.url).json == {'name': 'Fred'}
class TestErrorHandler:
url = '/error'
def test_error_handler_returns_422_response(self, testapp):
res = testapp.get(self.url + '?bad=42', expect_errors=True)
assert res.status_code == 422
assert 'errors' in res.json
assert 'bad' in res.json['errors']
assert res.json['errors']['bad'] == ['Invalid value.']
class TestUseArgsResource:
url = '/use_args'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred'}
class TestUseArgsWithParamResource:
url = '/use_args_with_param/42'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred', '_id': 42}
class TestUseKwargsResource:
url = '/use_kwargs'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred'}
class TestHookResource:
url = '/hook'
def test_parse_querystring(self, testapp):
assert testapp.get(self.url + '?name=Fred').json == {'name': 'Fred'}
|
[
"sloria1@gmail.com"
] |
sloria1@gmail.com
|
8e55286d2adba619b99dc413e3201836767bb789
|
a88a99fb3f754649db06ad86d22b5cb0d2d1e19c
|
/scholariumat/users/migrations/0005_auto_20181125_1759.py
|
37e390c09b8e19e06e7d8ed6a47ff1bf93ab1a89
|
[
"MIT"
] |
permissive
|
valuehack/scholariumat
|
91ec59647948759d917ce7077d06b0aa9618c807
|
47c13f3429b95b9ad5ca59b45cf971895260bb5c
|
refs/heads/master
| 2022-12-07T22:20:23.967854
| 2020-04-09T22:05:52
| 2020-04-09T22:05:52
| 135,466,121
| 0
| 3
|
MIT
| 2022-12-06T18:38:22
| 2018-05-30T15:55:14
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 540
|
py
|
# Generated by Django 2.0.9 on 2018-11-25 16:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20181120_1929'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='name',
new_name='last_name',
),
migrations.AddField(
model_name='profile',
name='first_name',
field=models.CharField(blank=True, max_length=200),
),
]
|
[
"merlin.buczek@gmail.com"
] |
merlin.buczek@gmail.com
|
a2156bc789a1d722ae16fc02c3016a476e85d470
|
41a4ef26cf3b4710dfa6fe3f1e88a935bb909654
|
/utils/logger.py
|
5a8cecd51025f9a4557d25d6b86232b0ad7b72a8
|
[] |
no_license
|
little-alexandra/attention_ocr
|
c6c0846342f947bbb8697f99e02cdd5ce2c276c2
|
475273573ae02efe1c7c1ba3905939580d26876e
|
refs/heads/master
| 2020-11-30T10:01:52.232714
| 2019-12-18T09:35:08
| 2019-12-18T09:35:08
| 230,371,558
| 1
| 0
| null | 2019-12-27T04:13:46
| 2019-12-27T04:13:45
| null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
import logging
import time
import os
from logging import handlers
import datetime
import tensorflow as tf
debug=True
def _p(tensor,msg):
if (debug):
dt = datetime.datetime.now().strftime('TF_DEBUG: %m-%d %H:%M:%S: ')
msg = dt + msg
return tf.Print(tensor, [tensor], msg,summarize= 100)
else:
return tensor
def _p_shape(tensor,msg):
if (debug):
dt = datetime.datetime.now().strftime('TF_DEBUG: %m-%d %H:%M:%S: ')
msg = dt + msg
return tf.Print(tensor, [tf.shape(tensor)], msg,summarize= 100)
else:
return tensor
def init(level=logging.DEBUG,when="D",backup=7,_format="%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d行 %(message)s"):
train_start_time = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
filename = 'logs/ocr-attention-'+train_start_time + '.log'
_dir = os.path.dirname(filename)
if not os.path.isdir(_dir):os.makedirs(_dir)
logger = logging.getLogger()
if not logger.handlers:
formatter = logging.Formatter(_format)
logger.setLevel(level)
handler = handlers.TimedRotatingFileHandler(filename, when=when, backupCount=backup,encoding="utf-8")
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
|
[
"piginzoo@gmail.com"
] |
piginzoo@gmail.com
|
9e3769ed23384bf504e6dc9a8a92c51ee8651186
|
d5ad13232e3f1ced55f6956bc4cbda87925c8085
|
/cc_mcc_seq/coverage/coverage_stat/1_coverage_stat.py
|
495a50838e4e7cb74a40295b588b592c6c6f5ef4
|
[] |
no_license
|
arvin580/SIBS
|
c0ba9a8a41f59cb333517c286f7d80300b9501a2
|
0cc2378bf62359ec068336ea4de16d081d0f58a4
|
refs/heads/master
| 2021-01-23T21:57:35.658443
| 2015-04-09T23:11:34
| 2015-04-09T23:11:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
chr=['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY']
for sample in range(3,13) :
for ch in chr :
inFile=open('../fudan1.coverage')
list1=list()
for line in inFile :
line=line.strip()
fields=line.split('\t')
if fields[0].find(ch)!=-1 :
list1.append(fields[sample])
ouFile=open('fudan1.coverage.'+ch+'.'+str(sample-3),'w')
for item in list1 :
ouFile.write(item+'\n')
ouFile.close()
inFile.close()
|
[
"sunhanice@gmail.com"
] |
sunhanice@gmail.com
|
2f0c21da46fc0a27a43c211905c51a9b98e78cad
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/resources/types/campaign_criterion_simulation.py
|
b270fa50fac0d5f5919e4dcac9d75a76b8179a43
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826
| 2021-06-16T20:42:38
| 2021-06-16T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,103
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v4.common.types import simulation
from google.ads.googleads.v4.enums.types import simulation_modification_method
from google.ads.googleads.v4.enums.types import simulation_type
from google.protobuf import wrappers_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.resources',
marshal='google.ads.googleads.v4',
manifest={
'CampaignCriterionSimulation',
},
)
class CampaignCriterionSimulation(proto.Message):
r"""A campaign criterion simulation. Supported combinations of
advertising channel type, criterion ids, simulation type and
simulation modification method is detailed below respectively.
1. SEARCH - 30000,30001,30002 - BID_MODIFIER - UNIFORM
2. SHOPPING - 30000,30001,30002 - BID_MODIFIER - UNIFORM
3. DISPLAY - 30001 - BID_MODIFIER - UNIFORM
Attributes:
resource_name (str):
Output only. The resource name of the campaign criterion
simulation. Campaign criterion simulation resource names
have the form:
``customers/{customer_id}/campaignCriterionSimulations/{campaign_id}~{criterion_id}~{type}~{modification_method}~{start_date}~{end_date}``
campaign_id (google.protobuf.wrappers_pb2.Int64Value):
Output only. Campaign ID of the simulation.
criterion_id (google.protobuf.wrappers_pb2.Int64Value):
Output only. Criterion ID of the simulation.
type_ (google.ads.googleads.v4.enums.types.SimulationTypeEnum.SimulationType):
Output only. The field that the simulation
modifies.
modification_method (google.ads.googleads.v4.enums.types.SimulationModificationMethodEnum.SimulationModificationMethod):
Output only. How the simulation modifies the
field.
start_date (google.protobuf.wrappers_pb2.StringValue):
Output only. First day on which the
simulation is based, in YYYY-MM-DD format.
end_date (google.protobuf.wrappers_pb2.StringValue):
Output only. Last day on which the simulation
is based, in YYYY-MM-DD format.
bid_modifier_point_list (google.ads.googleads.v4.common.types.BidModifierSimulationPointList):
Output only. Simulation points if the simulation type is
BID_MODIFIER.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
campaign_id = proto.Field(
proto.MESSAGE,
number=2,
message=wrappers_pb2.Int64Value,
)
criterion_id = proto.Field(
proto.MESSAGE,
number=3,
message=wrappers_pb2.Int64Value,
)
type_ = proto.Field(
proto.ENUM,
number=4,
enum=simulation_type.SimulationTypeEnum.SimulationType,
)
modification_method = proto.Field(
proto.ENUM,
number=5,
enum=simulation_modification_method.SimulationModificationMethodEnum.SimulationModificationMethod,
)
start_date = proto.Field(
proto.MESSAGE,
number=6,
message=wrappers_pb2.StringValue,
)
end_date = proto.Field(
proto.MESSAGE,
number=7,
message=wrappers_pb2.StringValue,
)
bid_modifier_point_list = proto.Field(
proto.MESSAGE,
number=8,
oneof='point_list',
message=simulation.BidModifierSimulationPointList,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
f7ca419508798f1929999e5cb30894c192fb6861
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/70/usersdata/164/36211/submittedfiles/impedimento.py
|
d16709d5193363df8e0a3b6b23963d9cbe92b2b9
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
# -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
L=int(input('Digite a posição de L: '))
R=int(input('Digite a posição de R: '))
D=int(input('Digite a posição de D: '))
if (R>50) and (L<R) and (R>D):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
f1bdea1ca514039345f8ed87d738ee50d65e70be
|
eacfc1c0b2acd991ec2cc7021664d8e79c9e58f6
|
/ccpnmr2.4/python/ccp/format/ansig/sequenceIO.py
|
c11cb240aad4a5e8c13d21ebc8b1168fd7d87865
|
[] |
no_license
|
edbrooksbank/ccpnmr2.4
|
cfecb0896dcf8978d796e6327f7e05a3f233a921
|
f279ca9bb2d972b1ce075dad5fcc16e6f4a9496c
|
refs/heads/master
| 2021-06-30T22:29:44.043951
| 2019-03-20T15:01:09
| 2019-03-20T15:01:09
| 176,757,815
| 0
| 1
| null | 2020-07-24T14:40:26
| 2019-03-20T14:59:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,597
|
py
|
"""
======================COPYRIGHT/LICENSE START==========================
sequenceIO.py: I/O for Ansig sequence files
Copyright (C) 2005-2009 Wim Vranken (European Bioinformatics Institute)
=======================================================================
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
A copy of this license can be found in ../../../../license/LGPL.license
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
======================COPYRIGHT/LICENSE END============================
for further information, please contact :
- CCPN website (http://www.ccpn.ac.uk/)
- PDBe website (http://www.ebi.ac.uk/pdbe/)
- contact Wim Vranken (wim@ebi.ac.uk)
=======================================================================
If you are using this software for academic purposes, we suggest
quoting the following references:
===========================REFERENCE START=============================
R. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.
Habeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,
H. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The
CCPN project: An interim report on a data model for the NMR community
(Progress report). Nature Struct. Biol. 9, 416-418.
Wim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus
H. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John
Ionides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:
Development of a Software Pipeline. Proteins 59, 687 - 696.
===========================REFERENCE END===============================
"""
import os
# Import general stuff
from memops.universal.Util import returnInt
from ccp.format.ansig.generalIO import AnsigGenericFile
from ccp.format.general.formatIO import Sequence, SequenceElement
#####################
# Class definitions #
#####################
class AnsigSequenceFile(AnsigGenericFile):
def initialize(self):
self.sequences = []
def read(self,verbose = 0):
if verbose == 1:
print "Reading ansig sequence file %s" % self.name
self.sequences.append(AnsigSequence())
lineErrors = []
validLines = 0
fin = open(self.name, 'rU')
# Read first line
line = fin.readline()
while line:
cols = line.split()
if len(cols) == 0 or self.patt['exclamation'].search(line):
pass
elif cols[0] == 'sequence':
self.sequences[-1].molName = cols[1]
elif cols[0] == 'residue':
# Get remarks on residue
details = line.split('!')
if len(details) > 1:
details = details[1]
else:
details = None
if cols[1] == 'lig':
self.sequences.append(AnsigSequence())
self.sequences[-1].elements.append(AnsigSequenceElement(1,cols[2],details = details,ligand = True))
else:
self.sequences[-1].elements.append(AnsigSequenceElement(cols[1],cols[2],details = details))
validLines += 1
else:
lineErrors.append(line)
line = fin.readline()
fin.close()
#
# Check
#
if len(lineErrors) > min(5,validLines * 0.5):
self.sequences = []
print " Bad %s format lines:%s" % (self.format,self.newline)
for lineError in lineErrors:
print lineError
def write(self,verbose = 0):
if verbose == 1:
print "Writing ansig sequence file %s" % self.name
if len(self.sequences) > 1:
print "Warning: multiple sequences - writing to same file."
fout = open(self.name,'w')
for sequence in self.sequences:
#
# Writing header
#
fout.write("! Ansig sequence file" + self.newline)
fout.write("!" + self.newline)
fout.write("! written from Ansig sequenceIO in ccpNmr formatConverter suite" + self.newline)
fout.write("!" + (self.newline * 2))
fout.write("sequence %s" % sequence.molName + self.newline)
#
# Write seqCode + code3Letter (lowercase with first uppercase)
#
for residue in sequence.elements:
resLabel = residue.code3Letter.lower().capitalize()
if residue.details:
addString = " ! %s" % residue.details
else:
addString = ""
if not residue.ligand:
fout.write(" residue %5d %3s%s" % (residue.seqCode,resLabel,addString))
else:
fout.write(" residue %5s %3s%s" % ('lig',resLabel,addString))
fout.write(self.newline)
fout.write("end_sequence" + self.newline)
fout.close()
AnsigSequence = Sequence
class AnsigSequenceElement(SequenceElement):
def setFormatSpecific(self,*args,**keywds):
if keywds.has_key('details') and keywds['details'] != None:
self.details = keywds['details'].strip()
else:
self.details = None
if keywds.has_key('ligand'):
self.ligand = True
else:
self.ligand = False
|
[
"ejb66@le.ac.uk"
] |
ejb66@le.ac.uk
|
a9dcfe05f4c4e478e4587c722f15febc52961ea1
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_welded.py
|
9d8c71c76ac244c69252e3e58618cb575c8c845f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
#calss header
class _WELDED():
def __init__(self,):
self.name = "WELDED"
self.definitions = weld
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['weld']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
3e436904f0f2dde6f5b4715e4ef0bab9ee10fb76
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/43.py
|
066bc53d2e703644911d495cf32ef62e7768e710
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
# Time: O((logn)^2)
# Space: O(logn)
def tidy_numbers():
digits = map(int, list(raw_input().strip()))
for i in reversed(xrange(1, len(digits))):
if digits[i] == 0 or digits[i] < digits[i-1]:
for j in xrange(i, len(digits)):
digits[j] = 9
for j in reversed(xrange(i)):
if digits[j] != 0:
digits[j] -= 1
break
else:
digits[j] = 9
return int("".join(map(str, digits)))
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, tidy_numbers())
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
7e2178bbaea5a74da95f3e91c473d6314be0c123
|
53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61
|
/.history/EMR/EMRryzd_1_20190410143150.py
|
447c8d147b75b36af2f0e17929c25c3b55f8321c
|
[] |
no_license
|
cyc19950621/python
|
4add54894dc81187211aa8d45e5115903b69a182
|
d184b83e73334a37d413306d3694e14a19580cb0
|
refs/heads/master
| 2020-04-11T20:39:34.641303
| 2019-07-02T12:54:49
| 2019-07-02T12:54:49
| 162,078,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
#-*- coding: UTF-8 -*-
#本文件用于提取目标目录中的所有txt,并提取关键词所在行到指定目录,并提取关键词新建文件
import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHR')#txt目录提取
zljhs = []
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrtxt = os.path.basename(emrtxt)
emrtxt_str = re.findall(r'(^.+?)\_',emrtxt)#提取ID
emrtxt = "".join(emrtxt_str)#转成str
pattern = r',|.|,|。|;|;'#清除标点
#txtp=txtp.decode('utf-8')
for line in f.readlines():
line = re.sub(' ','',line)#删除空格
if line.find (u'入院诊断:',0,6) >-1:
line = re.sub(r'h|H', '小时', line)#小时替换成中文
line = re.sub(r'入院诊断:', '', line)#删除入院诊断字样
line_deldl = re.split(r';|。|,|;|?',line)#根据标点分行
line_deld = '\n'.join(line_deldl) #转成str格式
line_out = re.sub(r'\d+、','\n',line_deld) #删除序号
line_output = re.split('\n',line_out)
line = ''.join(line_output)
EMRdef.text_create(r'D:\DeepLearning ER\EHRryzd','.txt' ,emrtxt,line)#导出带有诊疗计划的文件和诊疗计划
#zljhs.append(emrtxt+':'+line)
#EMRdef.text_save('D:\python\EMR\zljh.txt',zljhs)
|
[
"1044801968@qq.com"
] |
1044801968@qq.com
|
b3091967ca2295a42f480875d29c9828ca79c371
|
ee53b0262007b2f0db0fe15b2ad85f65fafa4e25
|
/Leetcode/594. Longest Harmonious Subsequence.py
|
ce388e0d7f6aef49ca1706ff198232b0568caac0
|
[] |
no_license
|
xiaohuanlin/Algorithms
|
bd48caacb08295fc5756acdac609be78e143a760
|
157cbaeeff74130e5105e58a6b4cdf66403a8a6f
|
refs/heads/master
| 2023-08-09T05:18:06.221485
| 2023-08-08T11:53:15
| 2023-08-08T11:53:15
| 131,491,056
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
'''
We define a harmonious array is an array where the difference between its maximum value and its minimum value is exactly 1.
Now, given an integer array, you need to find the length of its longest harmonious subsequence among all its possible subsequences.
Example 1:
Input: [1,3,2,2,5,2,3,7]
Output: 5
Explanation: The longest harmonious subsequence is [3,2,2,2,3].
Note: The length of the input array will not exceed 20,000.
'''
import unittest
class Solution:
def findLHS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
from collections import Counter
if nums == []:
return 0
nums_dict = Counter(nums)
nums = sorted(nums)
pre = nums[0]
result = []
for num in nums:
if num != pre:
if num - pre == 1:
result.append((pre, num))
pre = num
return max((nums_dict[record[0]] + nums_dict[record[1]] for record in result), default=0)
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
([1,3,2,2,5,2,3,7], 5),
([], 0),
([0, 1], 2)
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().findLHS(first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
|
[
"derek.xiao@loftk.us"
] |
derek.xiao@loftk.us
|
f56ec2e37f5245dcb7f44bca1b5244795ec53b15
|
3185dc605853fdaf942fd06e206225793b198638
|
/剑指offer/No33_二叉搜索树的后序遍历.py
|
3f91cd02bac6390558ce1f02fee1723e7f138edf
|
[] |
no_license
|
fank-cd/books_exercise_code
|
cb81ee8ec8167a5f5e3bfc58d3c1d6d931ca9286
|
1e8109adb82f741df1203658d4bf272f09a651b8
|
refs/heads/master
| 2021-07-11T01:15:11.980179
| 2020-06-29T04:01:53
| 2020-06-29T04:01:53
| 156,671,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
# 题目:输入一个整数数组,判断该数组是不是某二叉搜索
# 树的后序遍历结果。如果是则返回true,否则返回false。
# 假设输入的数组的任意两个数字都不相同。
# 后续遍历:左右根
# 思路:最后一位是根节点,比根节点小的是左子树,大的是右子树
def verify_sequence_of_BST(sequence):
if not isinstance(sequence, list) or len(sequence) == 0:
return False
root = sequence[-1] # 根节点
for index, element in enumerate(sequence): # 左子树
i = index
if element > root:
break
for element in sequence[i:]: # 判断右子树是否符合条件 即右子树大于根节点
if element < root:
return False
left = True
if i > 0:
left = verify_sequence_of_BST(sequence[:i])
right = True
if i < len(sequence) - 1:
right = verify_sequence_of_BST(sequence[i:-1])
return left and right
if __name__ == "__main__":
print(verify_sequence_of_BST([5, 7, 6, 9, 11, 10, 8]))
print(verify_sequence_of_BST([7, 4, 6, 5]))
|
[
"2464512446@qq.com"
] |
2464512446@qq.com
|
7b44dc1ca5547e96f889aa391f271ffc1af3a36b
|
f77b0f2cc709b9670e6b4dc7145a6ea5368585d2
|
/project/services/__init__.py
|
db39b8c13a29c94642f59828bc1ba56e2a186ac3
|
[] |
no_license
|
sgammon/StonerHub
|
45ccac6bd349200bbc75c494002c3ffeb082dcb8
|
a81f7fdd2c7118c6cea3c25ef9f53f272d27b0cc
|
refs/heads/master
| 2021-01-20T10:54:47.546251
| 2011-11-07T12:02:20
| 2011-11-07T12:02:20
| 2,664,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
# -*- coding: utf-8 -*-
from apptools import BaseService
import protorpc
from protorpc import remote
from protorpc import messages
from protorpc import message_types
class RemoteService(BaseService):
pass
|
[
"sgammon@bluestatedigital.com"
] |
sgammon@bluestatedigital.com
|
3081ad286ed7ba0b6f961edb970e6c68a711da17
|
43c5eafff817c3b130d8b4ec16c992d86208500c
|
/lino_avanti/lib/tickets/models.py
|
d831bc25bd9a0efddf3a6883c015383e6dfc9eaa
|
[
"BSD-2-Clause"
] |
permissive
|
khchine5/avanti
|
a8b9e9e75ac593a00b11ffa5bd5cfd00f06db6c0
|
5a5f9d1ddfa20ae0eb8fa33cb906daf78d9568b1
|
refs/heads/master
| 2021-01-11T17:29:16.012089
| 2018-08-23T05:46:52
| 2018-08-23T05:46:52
| 79,784,948
| 0
| 0
|
BSD-2-Clause
| 2018-08-19T12:28:12
| 2017-01-23T08:28:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2017 Luc Saffre
# License: BSD (see file COPYING for details)
from lino_xl.lib.tickets.models import *
from lino.api import _
Ticket.hide_elements('closed')
# class Ticket(Ticket):
# class Meta(Ticket.Meta):
# app_label = 'tickets'
# abstract = dd.is_abstract_model(__name__, 'Ticket')
# client = dd.ForeignKey('avanti.Client', blank=True, null=True)
dd.update_field(
'tickets.Ticket', 'upgrade_notes', verbose_name=_("Solution"))
# dd.update_field(
# 'tickets.Ticket', 'state', default=TicketStates.todo.as_callable)
class TicketDetail(TicketDetail):
main = "general #history_tab more"
general = dd.Panel("""
summary:40 end_user
id:6 user:12
workflow_buttons
description
""", label=_("General"))
# history_tab = dd.Panel("""
# changes.ChangesByMaster:50 #stars.StarsByController:20
# """, label=_("History"), required_roles=dd.login_required(Triager))
more = dd.Panel("""
more1:60
upgrade_notes:20
""", label=_("More"), required_roles=dd.login_required(Triager))
more1 = """
created modified ticket_type:10
deadline site
state priority project
# standby feedback closed
"""
Tickets.detail_layout = TicketDetail()
|
[
"luc.saffre@gmail.com"
] |
luc.saffre@gmail.com
|
020682c50ddf142dd210fcb7c0c287518d456cfd
|
f62fd455e593a7ad203a5c268e23129473d968b6
|
/python-watcher-1.0.1/watcher/api/controllers/v1/__init__.py
|
16279551e21185f3c32f42077fb0cddb05a9009b
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
MinbinGong/OpenStack-Ocata
|
5d17bcd47a46d48ff9e71e2055f667836174242f
|
8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3
|
refs/heads/master
| 2021-06-23T05:24:37.799927
| 2017-08-14T04:33:05
| 2017-08-14T04:33:05
| 99,709,985
| 0
| 2
| null | 2020-07-22T22:06:22
| 2017-08-08T15:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 7,004
|
py
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Version 1 of the Watcher API
NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED.
"""
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from watcher.api.controllers import link
from watcher.api.controllers.v1 import action
from watcher.api.controllers.v1 import action_plan
from watcher.api.controllers.v1 import audit
from watcher.api.controllers.v1 import audit_template
from watcher.api.controllers.v1 import goal
from watcher.api.controllers.v1 import scoring_engine
from watcher.api.controllers.v1 import service
from watcher.api.controllers.v1 import strategy
class APIBase(wtypes.Base):
created_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is created"""
updated_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is updated"""
deleted_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is deleted"""
def as_dict(self):
"""Render this object as a dict of its fields."""
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k) and
getattr(self, k) != wsme.Unset)
def unset_fields_except(self, except_list=None):
"""Unset fields so they don't appear in the message body.
:param except_list: A list of fields that won't be touched.
"""
if except_list is None:
except_list = []
for k in self.as_dict():
if k not in except_list:
setattr(self, k, wsme.Unset)
class MediaType(APIBase):
"""A media type representation."""
base = wtypes.text
type = wtypes.text
def __init__(self, base, type):
self.base = base
self.type = type
class V1(APIBase):
"""The representation of the version 1 of the API."""
id = wtypes.text
"""The ID of the version, also acts as the release number"""
media_types = [MediaType]
"""An array of supcontainersed media types for this version"""
audit_templates = [link.Link]
"""Links to the audit templates resource"""
audits = [link.Link]
"""Links to the audits resource"""
actions = [link.Link]
"""Links to the actions resource"""
action_plans = [link.Link]
"""Links to the action plans resource"""
scoring_engines = [link.Link]
"""Links to the Scoring Engines resource"""
services = [link.Link]
"""Links to the services resource"""
links = [link.Link]
"""Links that point to a specific URL for this version and documentation"""
@staticmethod
def convert():
v1 = V1()
v1.id = "v1"
v1.links = [link.Link.make_link('self', pecan.request.host_url,
'v1', '', bookmark=True),
link.Link.make_link('describedby',
'http://docs.openstack.org',
'developer/watcher/dev',
'api-spec-v1.html',
bookmark=True, type='text/html')
]
v1.media_types = [MediaType('application/json',
'application/vnd.openstack.watcher.v1+json')]
v1.audit_templates = [link.Link.make_link('self',
pecan.request.host_url,
'audit_templates', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'audit_templates', '',
bookmark=True)
]
v1.audits = [link.Link.make_link('self', pecan.request.host_url,
'audits', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'audits', '',
bookmark=True)
]
v1.actions = [link.Link.make_link('self', pecan.request.host_url,
'actions', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'actions', '',
bookmark=True)
]
v1.action_plans = [link.Link.make_link(
'self', pecan.request.host_url, 'action_plans', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'action_plans', '',
bookmark=True)
]
v1.scoring_engines = [link.Link.make_link(
'self', pecan.request.host_url, 'scoring_engines', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'scoring_engines', '',
bookmark=True)
]
v1.services = [link.Link.make_link(
'self', pecan.request.host_url, 'services', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'services', '',
bookmark=True)
]
return v1
class Controller(rest.RestController):
"""Version 1 API controller root."""
audits = audit.AuditsController()
audit_templates = audit_template.AuditTemplatesController()
actions = action.ActionsController()
action_plans = action_plan.ActionPlansController()
goals = goal.GoalsController()
scoring_engines = scoring_engine.ScoringEngineController()
services = service.ServicesController()
strategies = strategy.StrategiesController()
@wsme_pecan.wsexpose(V1)
def get(self):
# NOTE: The reason why convert() it's being called for every
# request is because we need to get the host url from
# the request object to make the links.
return V1.convert()
__all__ = ("Controller", )
|
[
"gongwayne@hotmail.com"
] |
gongwayne@hotmail.com
|
bc9c483d2db8d0a17d8feb2d6bf122ae4ee9d04a
|
3e2addcbdbccf688abfe3c653cc9bc8a57255fbb
|
/subword/get_vocab.py
|
7b71bf7f0609c1cf9f676db9398ec067465c9e32
|
[
"MIT"
] |
permissive
|
Germey/TextSummarization
|
3ffb01401051642c579d7ce6b169a41fca4b48d2
|
ffe221eea45838fbc2e12cd30e51fa48cf2ed070
|
refs/heads/master
| 2021-04-06T15:00:20.288387
| 2020-01-29T03:33:53
| 2020-01-29T03:33:53
| 124,391,071
| 1
| 1
| null | 2020-09-25T18:50:12
| 2018-03-08T12:52:25
|
Python
|
UTF-8
|
Python
| false
| false
| 320
|
py
|
#! /usr/bin/env python
from __future__ import print_function
import sys
from collections import Counter
c = Counter()
for line in sys.stdin:
for word in line.strip().split(' '):
if word:
c[word] += 1
for key,f in sorted(c.items(), key=lambda x: x[1], reverse=True):
print(key+" "+ str(f))
|
[
"cqc@cuiqingcai.com"
] |
cqc@cuiqingcai.com
|
75e25c09247507829e56bc9b21b111cf87fc5c63
|
6fe2d3c27c4cb498b7ad6d9411cc8fa69f4a38f8
|
/algorithms/algorithms-python/leetcode/Question_0050_Pow_x_n.py
|
e16603e02ca49194c4b65741a35009f0423d53c7
|
[] |
no_license
|
Lanceolata/code
|
aae54af632a212c878ce45b11dab919bba55bcb3
|
f7d5a7de27c3cc8a7a4abf63eab9ff9b21d512fb
|
refs/heads/master
| 2022-09-01T04:26:56.190829
| 2021-07-29T05:14:40
| 2021-07-29T05:14:40
| 87,202,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
if n < 0:
return 1 / x * 1 / self.myPow(x, -(n + 1))
elif n == 0:
return 1.0
elif n == 1:
return float(x)
half = self.myPow(x, n >> 1)
half *= half
if n & 1 == 1:
half *= x
return half
|
[
"lanceolatayuan@gmail.com"
] |
lanceolatayuan@gmail.com
|
5773aa56ca5ed8607d8fc9025b01c63b5333b19c
|
199115509359d0849d9debb12db4eced6081dbed
|
/emotion_chatbot/speechtotext.py
|
6394875fcb7f550ba787d59f0aea71ef9c1de8ee
|
[] |
no_license
|
Sapphirine/202005-1-EmotionRecognitionsystem
|
1ea1df3ddcd43bf5b25e76b534c75778a8d8b921
|
ca4efb89ed762f16202476d39314051cf12a1b7e
|
refs/heads/master
| 2022-08-29T20:00:21.097719
| 2020-05-16T05:05:11
| 2020-05-16T05:05:11
| 264,354,185
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 808
|
py
|
import speech_recognition as sr
import pyaudio
import audioop
import os
import math
from os import system
import threading
def transcribe_audio():
with sr.Microphone() as source:
# read the audio data from the default microphone
r = sr.Recognizer()
#print("Chatbot : Hey! How can I help?")
print("recording ...")
audio_data = r.record(source, duration=3)
# convert speech to text
text = r.recognize_google(audio_data, show_all = True)
try:
text = text['alternative'][0]['transcript']
#print(text)
return text
except:
print("can't hear you, speak up")
pass
return None
# with open("recorded.wav", "wb") as f:
# f.write(audio_data.get_wav_data())
|
[
"noreply@github.com"
] |
Sapphirine.noreply@github.com
|
e90d3914edcfc23c1e31112856f99c98b766538b
|
a450d455fc1da6f3a89eebb562cc2fb28784b129
|
/docs/conf.py
|
36714ac69f1de680f841bad04edc215e5a835504
|
[
"MIT"
] |
permissive
|
hawkthorne/bearweb
|
6c62e0143ab6a19bee6cf340dfec81664f201dcb
|
1533acd9c7610d9ea01e8413853cca70843b9d63
|
refs/heads/master
| 2021-05-28T05:54:55.533462
| 2014-03-16T23:12:01
| 2014-03-16T23:12:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,778
|
py
|
# -*- coding: utf-8 -*-
#
# stackmachine documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 17 11:46:20 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stackmachine'
copyright = u'2013, ChangeMyName'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'stackmachinedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'stackmachine.tex', u'stackmachine Documentation',
u'ChangeToMyName', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'stackmachine', u'stackmachine Documentation',
[u'ChangeToMyName'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'stackmachine', u'stackmachine Documentation',
u'ChangeToMyName', 'stackmachine', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
[
"kyle@kyleconroy.com"
] |
kyle@kyleconroy.com
|
dfdafec9f08b465c99582efafef3c5f0429c748a
|
70c10c5eeeb038d87841289f109775a9ec5ddc9b
|
/Day8/thread_lock.py
|
ab3ea522b2969168d19431f244cdd1e35f9d5c6c
|
[] |
no_license
|
chenshuo666/pythonWell
|
a68fdae5fab891dd235c7656892ac85eaf9163c5
|
ae4cd795c5cf925830b5f7df8c7668bec5b2904c
|
refs/heads/master
| 2022-12-09T21:35:00.343591
| 2018-09-10T06:04:45
| 2018-09-10T06:04:45
| 130,060,002
| 1
| 0
| null | 2022-12-08T00:44:33
| 2018-04-18T12:38:02
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 431
|
py
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
# Author:Sebastian Williams
import time
import threading
lock=threading.RLock()
num=0
def run(n):
lock.acquire()
global num
num+=1
lock.release()
t_obj=[]
for i in range(50):
t=threading.Thread(target=run,args=("t-%s"%i,))
t.setDaemon(True) #将当前线程设置成守护线程
t.start()
t_obj.append(t)
for t in t_obj:
t.join()
print("num:",num)
|
[
"929387146@qq.com"
] |
929387146@qq.com
|
e8f740e26272bafd38b053bb5e8e1737f5b16837
|
d62863d049c0206bfa744ca4c9e886030bfce1ab
|
/apps/sw_shop/sw_order/views.py
|
2eb7cbc57083cb103a5b7c6b5f867553d8a37e5f
|
[] |
no_license
|
jurgeon018/box
|
51738b99e640202936ed72357d3c67d2517e589b
|
50b84a0afa73fab85a00eef54194f3c126d15397
|
refs/heads/master
| 2021-07-17T13:37:08.665292
| 2020-10-15T09:50:33
| 2020-10-15T09:50:33
| 232,013,297
| 0
| 1
| null | 2020-03-27T02:16:44
| 2020-01-06T03:01:34
|
Python
|
UTF-8
|
Python
| false
| false
| 937
|
py
|
from django.shortcuts import render, redirect
from .models import Payment, Order
from box.core.sw_currency.models import Currency
from django.http import JsonResponse
def liqpay_callback(request):
try:
from box.apps.sw_payment.liqpay.utils import create_liqpay_transaction
except:
# except RuntimeError:
from sw_liqpay.utils import create_liqpay_transaction
# if request.method == 'GET':
# return JsonResponse({'Hello':'Hello'})
print('order liqpay_callback')
form = create_liqpay_transaction(request)
transaction = form.instance
print("transaction:", transaction)
print("transaction.order_id:", transaction.order_id)
order = Order.objects.get(id=transaction.order_id)
payment = Payment.objects.create(
order=order,
amount=transaction.amount,
currency=Currency.objects.get(code=transaction.currency)
)
order.make_order(request)
return redirect('thank_you')
|
[
"jurgeon018@gmail.com"
] |
jurgeon018@gmail.com
|
4609643ace645f4f97139036b9c90ce0a8d8ba63
|
a073e730e32b75f5d4e02f6bcf8defcaa9c5da97
|
/backend/fashion_candy_18064/settings.py
|
bba67dbee4f4084134baf664a6af612e2792240d
|
[] |
no_license
|
crowdbotics-apps/fashion-candy-18064
|
a962d38c9cbc27a90428dda3f962edbb32b5e427
|
87b8ef7ce8c9e29c8b543cfc5b0a0af8191f52a8
|
refs/heads/master
| 2022-10-21T06:00:25.083275
| 2020-06-13T01:15:59
| 2020-06-13T01:15:59
| 271,917,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,884
|
py
|
"""
Django settings for fashion_candy_18064 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"task",
"task_profile",
"tasker_business",
"location",
"wallet",
"task_category",
"chat",
"chat_user_profile",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "fashion_candy_18064.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "fashion_candy_18064.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
7b285f649b01ce696d22c4fbd5cb8dae8b8d1286
|
e63a36870512edb7fd947b809631cf153b028997
|
/doc/source/conf.py
|
eed13ab7549cb2d023a203b106150682f25abb0b
|
[
"Apache-2.0"
] |
permissive
|
titilambert/surveil
|
632c7e65d10e03c675d78f278822015346f5c47a
|
8feeb64e40ca2bd95ebd60506074192ecdf627b6
|
refs/heads/master
| 2020-05-25T13:36:59.708227
| 2015-06-29T14:07:07
| 2015-06-29T14:07:07
| 38,249,530
| 1
| 0
| null | 2015-06-29T13:38:04
| 2015-06-29T13:38:03
| null |
UTF-8
|
Python
| false
| false
| 2,531
|
py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinxcontrib.autohttp.flask',
'sphinxcontrib.pecanwsme.rest',
# 'oslosphinx',
'wsmeext.sphinxext',
]
wsme_protocols = ['restjson', 'restxml']
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'surveil'
copyright = u'2014-2015, Surveil Contributors'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# -- Options for manual page output -------------------------------------------
# If true, show URL addresses after external links.
man_show_urls = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
[
"alexandre.viau@savoirfairelinux.com"
] |
alexandre.viau@savoirfairelinux.com
|
d675945d9dcc0f99396a02cefb496fc58d518c2b
|
edb906c10790abc1eba4047bca557aa173616f10
|
/business/serializer.py
|
9c6d69a222cb016743cecfb85d7e3c1b365aab12
|
[] |
no_license
|
Wakarende/neighbourhood
|
743d26ee76a79018865a15c523f390c35812b73c
|
29003acc8f760046a33f1b3313b5a016a007890d
|
refs/heads/master
| 2023-05-13T12:43:53.257053
| 2021-06-08T06:59:09
| 2021-06-08T06:59:09
| 373,812,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from rest_framework import serializers
from .models import BusinessModel
from django.db import models
class BusinessSerializer(serializers.ModelSerializer):
class Meta:
model=BusinessModel
fields='__all__'
|
[
"joykirii@gmail.com"
] |
joykirii@gmail.com
|
54916cd6aef8b96949a3900348ef5e689648aa2c
|
1ed4e96c20da03fbd3aa4f18d4b004a59d8f89e5
|
/Repo/venv/Lib/site-packages/torch/utils/file_baton.py
|
d474bfb4a810ea042d978407b1239dea9dd3f8b9
|
[] |
no_license
|
donhatkha/CS2225.CH1501
|
eebc854864dc6fe72a3650f640787de11d4e82b7
|
19d4dd3b11f8c9560d0d0a93882298637cacdc80
|
refs/heads/master
| 2023-07-19T13:27:17.862158
| 2021-02-08T07:19:05
| 2021-02-08T07:19:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,625
|
py
|
import os
import sys
import time
if sys.version < '3.3':
# Note(jiayq): in Python 2, FileExistsError is not defined and the
# error manifests it as OSError.
FileExistsError = OSError
class FileBaton:
'''A primitive, file-based synchronization utility.'''
def __init__(self, lock_file_path, wait_seconds=0.1):
'''
Creates a new :class:`FileBaton`.
Args:
lock_file_path: The path to the file used for locking.
wait_seconds: The seconds to periorically sleep (spin) when
calling ``wait()``.
'''
self.lock_file_path = lock_file_path
self.wait_seconds = wait_seconds
self.fd = None
def try_acquire(self):
'''
Tries to atomically create a file under exclusive access.
Returns:
True if the file could be created, else False.
'''
try:
self.fd = os.open(self.lock_file_path, os.O_CREAT | os.O_EXCL)
return True
except FileExistsError:
return False
def wait(self):
'''
Periodically sleeps for a certain amount until the baton is released.
The amount of time slept depends on the ``wait_seconds`` parameter
passed to the constructor.
'''
while os.path.exists(self.lock_file_path):
time.sleep(self.wait_seconds)
def release(self):
'''Releases the baton and removes its file.'''
if self.fd is not None:
os.close(self.fd)
os.remove(self.lock_file_path)
|
[
"59596379+khado2359@users.noreply.github.com"
] |
59596379+khado2359@users.noreply.github.com
|
894489a6d159e040d5ca697e4bb1fadf471b887c
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/lib/python2.7/site-packages/_pytest/recwarn.py
|
753bfd18742651b338e79169aab68ed417785218
|
[
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 7,173
|
py
|
""" recording warnings during test function execution. """
import inspect
import py
import sys
import warnings
import pytest
@pytest.yield_fixture
def recwarn(request):
"""Return a WarningsRecorder instance that provides these methods:
* ``pop(category=None)``: return last warning matching the category.
* ``clear()``: clear list of warnings
See http://docs.python.org/library/warnings.html for information
on warning categories.
"""
wrec = WarningsRecorder()
with wrec:
warnings.simplefilter('default')
yield wrec
def pytest_namespace():
return {'deprecated_call': deprecated_call,
'warns': warns}
def deprecated_call(func, *args, **kwargs):
""" assert that calling ``func(*args, **kwargs)`` triggers a
``DeprecationWarning`` or ``PendingDeprecationWarning``.
Note: we cannot use WarningsRecorder here because it is still subject
to the mechanism that prevents warnings of the same type from being
triggered twice for the same module. See #1190.
"""
categories = []
def warn_explicit(message, category, *args, **kwargs):
categories.append(category)
old_warn_explicit(message, category, *args, **kwargs)
def warn(message, category=None, *args, **kwargs):
if isinstance(message, Warning):
categories.append(message.__class__)
else:
categories.append(category)
old_warn(message, category, *args, **kwargs)
old_warn = warnings.warn
old_warn_explicit = warnings.warn_explicit
warnings.warn_explicit = warn_explicit
warnings.warn = warn
try:
ret = func(*args, **kwargs)
finally:
warnings.warn_explicit = old_warn_explicit
warnings.warn = old_warn
deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
if not any(issubclass(c, deprecation_categories) for c in categories):
__tracebackhide__ = True
raise AssertionError("%r did not produce DeprecationWarning" % (func,))
return ret
def warns(expected_warning, *args, **kwargs):
"""Assert that code raises a particular class of warning.
Specifically, the input @expected_warning can be a warning class or
tuple of warning classes, and the code must return that warning
(if a single class) or one of those warnings (if a tuple).
This helper produces a list of ``warnings.WarningMessage`` objects,
one for each warning raised.
This function can be used as a context manager, or any of the other ways
``pytest.raises`` can be used::
>>> with warns(RuntimeWarning):
... warnings.warn("my warning", RuntimeWarning)
"""
wcheck = WarningsChecker(expected_warning)
if not args:
return wcheck
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
with wcheck:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
else:
func = args[0]
with wcheck:
return func(*args[1:], **kwargs)
class RecordedWarning(object):
def __init__(self, message, category, filename, lineno, file, line):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
self.file = file
self.line = line
class WarningsRecorder(object):
"""A context manager to record raised warnings.
Adapted from `warnings.catch_warnings`.
"""
def __init__(self, module=None):
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
self._list = []
@property
def list(self):
"""The list of recorded warnings."""
return self._list
def __getitem__(self, i):
"""Get a recorded warning by index."""
return self._list[i]
def __iter__(self):
"""Iterate through the recorded warnings."""
return iter(self._list)
def __len__(self):
"""The number of recorded warnings."""
return len(self._list)
def pop(self, cls=Warning):
"""Pop the first recorded warning, raise exception if not exists."""
for i, w in enumerate(self._list):
if issubclass(w.category, cls):
return self._list.pop(i)
__tracebackhide__ = True
raise AssertionError("%r not found in warning list" % cls)
def clear(self):
"""Clear the list of recorded warnings."""
self._list[:] = []
def __enter__(self):
if self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def showwarning(message, category, filename, lineno,
file=None, line=None):
self._list.append(RecordedWarning(
message, category, filename, lineno, file, line))
# still perform old showwarning functionality
self._showwarning(
message, category, filename, lineno, file=file, line=line)
self._module.showwarning = showwarning
# allow the same warning to be raised more than once
self._module.simplefilter('always', append=True)
return self
def __exit__(self, *exc_info):
if not self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
class WarningsChecker(WarningsRecorder):
def __init__(self, expected_warning=None, module=None):
super(WarningsChecker, self).__init__(module=module)
msg = ("exceptions must be old-style classes or "
"derived from Warning, not %s")
if isinstance(expected_warning, tuple):
for exc in expected_warning:
if not inspect.isclass(exc):
raise TypeError(msg % type(exc))
elif inspect.isclass(expected_warning):
expected_warning = (expected_warning,)
elif expected_warning is not None:
raise TypeError(msg % type(expected_warning))
self.expected_warning = expected_warning
def __exit__(self, *exc_info):
super(WarningsChecker, self).__exit__(*exc_info)
# only check if we're not currently handling an exception
if all(a is None for a in exc_info):
if self.expected_warning is not None:
if not any(r.category in self.expected_warning for r in self):
__tracebackhide__ = True
pytest.fail("DID NOT WARN")
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
b891c1b843660a251c2fc198054adab99dfba8ab
|
289bc4207b1c3efe3b99ac637d1ddfb88e28a5be
|
/Section05/debug_example.py
|
cdd21ee0a121d1d8cc04721135fa4e495be7ebd1
|
[
"MIT"
] |
permissive
|
PacktPublishing/-Hands-on-Reinforcement-Learning-with-TensorFlow
|
f5e41ed9e218f721b179e0b1d9aaa3c27957d38a
|
6de9980db2bfc761524c27606e6495c093ddf516
|
refs/heads/master
| 2021-06-20T19:31:45.442884
| 2021-01-15T08:59:53
| 2021-01-15T08:59:53
| 145,985,316
| 11
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
learning_rate = 0.01
num_epochs = 1000
train_X = np.asarray(
[3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1])
train_Y = np.asarray(
[1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3])
n_samples = train_X.shape[0]
input_x = tf.placeholder("float")
actual_y = tf.placeholder("float")
# Simple linear regression tries to find W and b such that
# y = Wx + b
W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")
prediction = tf.add(tf.multiply(input_x, W), b)
loss = tf.squared_difference(actual_y, prediction)
# loss = tf.Print(loss, [loss], 'Loss: ', summarize=n_samples)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# sess = tf_debug.TensorBoardDebugWrapperSession(
# sess, 'localhost:6064')
sess.run(init)
initial_loss = sess.run(loss, feed_dict={
input_x: train_X,
actual_y: train_Y
})
print("Initial loss", initial_loss)
for epoch in range(num_epochs):
for x, y in zip(train_X, train_Y):
_, c_loss = sess.run([optimizer, loss], feed_dict={
input_x: x,
actual_y: y
})
tf.add_to_collection("Asserts", tf.assert_less(loss, 2.0, [loss]))
tf.add_to_collection("Asserts", tf.assert_positive(loss, [loss]))
assert_op = tf.group(*tf.get_collection('Asserts'))
final_loss, _ = sess.run([loss, assert_op], feed_dict={
input_x: train_X,
actual_y: train_Y
})
print("Final Loss: {}\n W:{}, b:{}".format(
final_loss, sess.run(W), sess.run(b)))
|
[
"noreply@github.com"
] |
PacktPublishing.noreply@github.com
|
cfc1119d27d112c18ef8a4cf62a55b777689fd7e
|
57775b4c245723078fd43abc35320cb16f0d4cb6
|
/Leetcode/hash-table/find-words-that-can-be-formed-by-characters.py
|
5d7dfafa4509bc65009744a92a838b98562774db
|
[] |
no_license
|
farhapartex/code-ninja
|
1757a7292ac4cdcf1386fe31235d315a4895f072
|
168fdc915a4e3d3e4d6f051c798dee6ee64ea290
|
refs/heads/master
| 2020-07-31T16:10:43.329468
| 2020-06-18T07:00:34
| 2020-06-18T07:00:34
| 210,668,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
class Solution:
def countCharacters(self, words: List[str], chars: str) -> int:
n = 0
for word in words:
y = chars
flag = False
for w in word:
if w in y:
y = y.replace(w,"",1)
else:
flag = True
break
if not flag:
n += len(word)
return n
|
[
"farhapartex@gmail.com"
] |
farhapartex@gmail.com
|
755a2aaad8acce2a42387f5c5739b2381f4c4253
|
8fe6a6790013eed7ca470414c398ea4848e798c4
|
/src/datasets.py
|
c9a59f9a12256e6e0655c82d503fc40b155f989a
|
[
"MIT"
] |
permissive
|
sovit-123/SSD300-VGG11-on-Pascal-VOC-2005-Data
|
d7aef30277076561c46d5f8a3d07985e09b9f13c
|
cb21c4c3e762a0184611b1a1659e7e730ef31932
|
refs/heads/master
| 2022-12-02T11:51:59.256715
| 2020-08-06T16:08:24
| 2020-08-06T16:08:24
| 284,756,098
| 3
| 0
| null | 2020-08-06T05:40:31
| 2020-08-03T16:53:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,345
|
py
|
import torch
import json
import os
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from utils import transform
class PascalVOCDataset(Dataset):
"""
Custom dataset to load PascalVOC data as batches
"""
def __init__(self, data_folder, split):
"""
:param data_folder: folder path of the data files
:param split: either `TRAIN` or `TEST`
"""
self.split = split.upper()
assert self.split in {'TRAIN', 'TEST'}
self.data_folder = data_folder
# read the data files
with open(os.path.join(data_folder,
self.split + '_images.json'), 'r') as j:
self.images = json.load(j)
with open(os.path.join(data_folder,
self.split + '_objects.json'), 'r') as j:
self.objects = json.load(j)
assert len(self.images) == len(self.objects)
def __len__(self):
return len(self.images)
def __getitem__(self, i):
# read image
image = Image.open(self.images[i])
image = image.convert('RGB')
# get bounding boxes, labels, diffculties for the corresponding image
# all of them are objects
objects = self.objects[i]
boxes = torch.FloatTensor(objects['boxes']) # (n_objects, 4)
labels = torch.LongTensor(objects['labels']) # (n_objects)
# apply transforms
image, boxes, labels = transform(image, boxes, labels, split=self.split)
return image, boxes, labels
def collate_fn(self, batch):
"""
Each batch can have different number of objects.
We will pass this collate function to the DataLoader.
You can define this function outside the class as well.
:param batch: iterable items from __getitem(), size equal to batch size
:return: a tensor of images, lists of varying-size tensors of
bounding boxes, labels, and difficulties
"""
images = list()
boxes = list()
labels = list()
for b in batch:
images.append(b[0])
boxes.append(b[1])
labels.append(b[2])
images = torch.stack(images, dim=0)
# return a tensor (N, 3, 300, 300), 3 lists of N tesnors each
return images, boxes, labels
|
[
"sovitrath5@gmail.com"
] |
sovitrath5@gmail.com
|
3c54d18e1ddff0980eaddc81064f2886f30343da
|
80052e0cbfe0214e4878d28eb52009ff3054fe58
|
/e2yun_addons/extra-addons/merp_picking_wave/wizard/message_wizard.py
|
0e5a598da22cd2017115952c6a2839e8fa5675d9
|
[] |
no_license
|
xAlphaOmega/filelib
|
b022c86f9035106c24ba806e6ece5ea6e14f0e3a
|
af4d4b079041f279a74e786c1540ea8df2d6b2ac
|
refs/heads/master
| 2021-01-26T06:40:06.218774
| 2020-02-26T14:25:11
| 2020-02-26T14:25:11
| 243,349,887
| 0
| 2
| null | 2020-02-26T19:39:32
| 2020-02-26T19:39:31
| null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# Copyright 2019 VentorTech OU
# Part of Ventor modules. See LICENSE file for full copyright and licensing details.
from odoo import models, fields as oe_fields, api, _
class MessageWizard(models.TransientModel):
_name = 'message.wizard'
message = oe_fields.Text()
@api.model
def default_get(self, fields):
res = super(MessageWizard, self).default_get(fields)
res['message'] = self.env.context.get('message')
return res
@api.multi
def wizard_view(self):
view = self.env.ref('merp_picking_wave.view_message_wizard')
return {
'name': _('Message'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'message.wizard',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
# 'res_id': self.ids[0],
'context': self.env.context,
}
|
[
"joytao.zhu@icloud.com"
] |
joytao.zhu@icloud.com
|
8f53dfcc7f2f3305bc737e1491065fa5815c5aa6
|
660328cb139ce1f90da70dbe640df62bf79bcc61
|
/infra/src/stages/train_stage_base.py
|
05241ecd3f6f9cd28a02c5b6c310f1a38a27c433
|
[
"MIT-0"
] |
permissive
|
cyrilhamidechi/amazon-frauddetector-mlops-multiaccount-cdk
|
0801f4b844bd9b8e80776748c1056db83c9023fb
|
379def0a571452b7920a9aaa56bccc2bfb39c523
|
refs/heads/main
| 2023-04-23T13:49:37.413348
| 2021-05-10T18:37:41
| 2021-05-10T18:37:41
| 366,139,181
| 0
| 0
|
NOASSERTION
| 2021-05-10T18:25:59
| 2021-05-10T18:25:58
| null |
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
# ***************************************************************************************
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. *
# *
# Permission is hereby granted, free of charge, to any person obtaining a copy of this *
# software and associated documentation files (the "Software"), to deal in the Software *
# without restriction, including without limitation the rights to use, copy, modify, *
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to *
# permit persons to whom the Software is furnished to do so. *
# *
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A *
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT *
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION *
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE *
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *
# ***************************************************************************************
from typing import List, Dict
from aws_cdk import core
from aws_cdk.aws_codebuild import BuildEnvironmentVariable
from aws_cdk.aws_codepipeline import Artifact
from aws_cdk.aws_codepipeline_actions import Action
StageActionList = List[Action]
OutputArtifacts = List[Artifact]
OutputVariables = Dict[str, BuildEnvironmentVariable]
VariableNamespace = str
class TrainStageBase:
@property
def name(self):
return NotImplementedError
@property
def output_variables(self):
return NotImplementedError
def get_stage_actions(self, scope: core.Construct, env: str, stage_name: str,
source_artifacts: List[Artifact]) -> (StageActionList, VariableNamespace):
"""
Creates stage actions and returns the actions, the output artifacts and output variables
:param env:
:param scope:
:param stage_name:
:param source_artifacts:
:return:
"""
raise NotImplementedError
|
[
"aeg@amazon.com"
] |
aeg@amazon.com
|
d65784b7ec0cdad3d8e2ac0b3c31ebe3e21c263e
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_embassy.py
|
8c0f88ca692520c38a075b1fcc908e831280b60a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
#calss header
class _EMBASSY():
def __init__(self,):
self.name = "EMBASSY"
self.definitions = [u'the group of people who represent their country in a foreign country: ', u'the building that these people work in: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
57b68e4a74604876334affc613d1d972667cfbe0
|
c6431cdf572dd10f0f4d45839e6081124b246f90
|
/code/lc3.py
|
2ec290d3bc538c94abd8a7e80c92e02c5ff01e14
|
[] |
no_license
|
bendanwwww/myleetcode
|
1ec0285ea19a213bc629e0e12fb8748146e26d3d
|
427846d2ad1578135ef92fd6549235f104f68998
|
refs/heads/master
| 2021-09-27T19:36:40.111456
| 2021-09-24T03:11:32
| 2021-09-24T03:11:32
| 232,493,899
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
"""
给定一个字符串,请你找出其中不含有重复字符的 最长子串 的长度。
示例 1:
输入: "abcabcbb"
输出: 3
解释: 因为无重复字符的最长子串是 "abc",所以其长度为 3。
示例 2:
输入: "bbbbb"
输出: 1
解释: 因为无重复字符的最长子串是 "b",所以其长度为 1。
示例 3:
输入: "pwwkew"
输出: 3
解释: 因为无重复字符的最长子串是 "wke",所以其长度为 3。
请注意,你的答案必须是 子串 的长度,"pwke" 是一个子序列,不是子串。
"""
class Solution(object):
def lengthOfLongestSubstring(self, s):
dictMap = {}
res = 0
first = 0
for i in range(len(s)):
if s[i] not in dictMap:
dictMap[s[i]] = i
else:
index = dictMap[s[i]]
first = max(first, index + 1)
dictMap[s[i]] = i
res = max(res, i - first + 1)
return res
s = Solution()
res = s.lengthOfLongestSubstring('abba')
print(res)
|
[
"461806307@qq.com"
] |
461806307@qq.com
|
903a77f4a02718a688e108e05b286348b1c99a65
|
eef243e450cea7e91bac2f71f0bfd45a00c6f12c
|
/.history/worker_master_20210128031009.py
|
16168c2cf8a83eb1dfb82220fd383d845e05ab9a
|
[] |
no_license
|
hoaf13/nlp-chatbot-lol
|
910ab2ea3b62d5219901050271fc1a1340e46a2f
|
18cb64efa9d6b4cafe1015f1cd94f4409271ef56
|
refs/heads/master
| 2023-05-08T04:17:19.450718
| 2021-02-02T02:37:38
| 2021-02-02T02:37:38
| 332,535,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
import redis
red = redis.StrictRedis(host='localhost',port=6379,db=0)
queue = list()
def str_to_bool(str):
if str == b'False':
return False
if str == b'True':
return True
return None
while True:
# check supplier product status
is_new = str_to_bool(red.get("new_product_worker1"))
if is_new:
taken_product = red.get('product_worker1')
queue.append(taken_product)
red.set("new_product_worker1", str(False))
|
[
"samartcall@gmail.com"
] |
samartcall@gmail.com
|
509601af0ae5337e7f8b9fc2f49be25dda28dc54
|
4acc08d2c165b5d88119df6bb4081bcfaca684f7
|
/PythonPrograms/python_program/multiple_matrix.py
|
c88486026ddb4fe56be352d8cd4c3a355b0923f6
|
[] |
no_license
|
xiaotuzixuedaima/PythonProgramDucat
|
9059648f070db7304f9aaa45657c8d3df75f3cc2
|
90c6947e6dfa8ebb6c8758735960379a81d88ae3
|
refs/heads/master
| 2022-01-16T04:13:17.849130
| 2019-02-22T15:43:18
| 2019-02-22T15:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
# Python Program to Multiply Two Matrices ????
l = [[1,2,3],
[2,3,4],
[3,4,5]]
m = [[3,4,5],
[5,6,7],
[6,7,8]]
for i in range(3):
for j in range(len(l)):
sum = 0
for k in range(len(m)):
sum=sum + l[i][k] * m[k][j]
print(sum,end=" ")
print()
'''
output ==
l = [[1,2,3],
[2,3,4],
[3,4,5]]
m = [[3,4,5],
[5,6,7],
[6,7,8]]
output ==== ***********
l*m = 31 37 43 *
45 54 63 *
59 71 83 *
==== ***********
'''
|
[
"ss7838094755@gmail.com"
] |
ss7838094755@gmail.com
|
83503bae694f4bdf6c82b15e366f28e0066e3537
|
93e9bbcdd981a6ec08644e76ee914e42709579af
|
/depth-first-search/323_Number_of_Connected_Components_in_an_Undirected_Graph.py
|
4009e47f32f8f461cc7dbd9fe5c9d094309c835b
|
[] |
no_license
|
vsdrun/lc_public
|
57aa418a8349629494782f1a009c1a8751ffe81d
|
6350568d16b0f8c49a020f055bb6d72e2705ea56
|
refs/heads/master
| 2020-05-31T11:23:28.448602
| 2019-10-02T21:00:57
| 2019-10-02T21:00:57
| 190,259,739
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,112
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/number-of-connected-components-in-an-undirected-graph/description/
Given n nodes labeled from 0 to n - 1 and a list of undirected edges
(each edge is a pair of nodes)
write a function to find the number of
connected components in an undirected graph.
Example 1:
0 3
| |
1 --- 2 4
Given n = 5 and edges = [[0, 1], [1, 2], [3, 4]], return 2.
Example 2:
0 4
| |
1 --- 2 --- 3
Given n = 5 and edges = [[0, 1], [1, 2], [2, 3], [3, 4]], return 1.
Note:
You can assume that no duplicate edges will appear in edges.
Since all edges are undirected,
[0, 1] is the same as [1, 0] and thus will not appear together in edges.
"""
class Solution(object):
def countComponents(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
from __builtin__ import xrange
graph = {i: [] for i in xrange(n)}
# build graph
for e in edges:
graph[e[0]] += e[1],
graph[e[1]] += e[0],
def dfs(key):
child = graph.pop(key, [])
for c in child:
dfs(c)
cnt = 0
while graph:
key = graph.keys()[0]
dfs(key)
cnt += 1
return cnt
def rewrite(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
# build bi-dir graph
dmap = {i: [] for i in range(n)}
for e in edges:
dmap[e[0]].append(e[1])
dmap[e[1]].append(e[0])
def dfs(node):
child = dmap.pop(node, [])
for c in child:
dfs(c)
cnt = 0
while dmap:
cnt += 1
k = dmap.keys()[0]
dfs(k)
return cnt
def build():
return 5, [[0, 1], [1, 2], [3, 4]]
if __name__ == "__main__":
s = Solution()
print(s.countComponents(*build()))
print(s.rewrite(*build()))
|
[
"vsdmars@gmail.com"
] |
vsdmars@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.