hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd1aa8274a7a29ed0c0e03c51dfff1daaec8435e
| 26
|
py
|
Python
|
data/studio21_generated/introductory/4783/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/4783/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/4783/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
def weather_info (temp):
| 13
| 24
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dd1d14cad8d7b51f2d3997d8681795f2ed9d4e1a
| 2,863
|
py
|
Python
|
server/blog/models.py
|
rafay826/django-react
|
60bd7dcea8bb5c921f80d064e4d16fa34381ae09
|
[
"MIT"
] | null | null | null |
server/blog/models.py
|
rafay826/django-react
|
60bd7dcea8bb5c921f80d064e4d16fa34381ae09
|
[
"MIT"
] | 12
|
2020-06-05T21:17:35.000Z
|
2022-03-11T23:49:11.000Z
|
server/blog/models.py
|
midasdev711/djudo
|
5717ad63b0ef5dddddfd1d3839fa5231ac21972f
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.db.models import permalink
from django.core.urlresolvers import reverse
from comments.models import Comment
# Create your models here.
class PostManager(models.Manager):
def active(self, *args, **kwargs):
return super(PostManager, self)
def upload_location(instance, filename):
#filebase, extension = filename.split(".")
#return "%s/%s.%s" %(instance.id, instance.id, extension)
PostModel = instance.__class__
"""
instance.__class__ gets the model Post. We must use this method because the model is defined below.
Then create a queryset ordered by the "id"s of each object,
Then we get the last object in the queryset with `.last()`
Which will give us the most recently created Model instance
We add 1 to it, so we get what should be the same id as the the post we are creating.
"""
return "static/images/posts/%s" % filename
class Post(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
image = models.ImageField(upload_to=upload_location,
null=True,
blank=True,
width_field="width_field",
height_field="height_field")
image_url = models.CharField(max_length=1000, null=True, blank=True)
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
description = models.CharField(max_length=255, blank=True)
body = models.TextField()
published = models.BooleanField(default=True)
created = models.DateTimeField(db_index=True, auto_now_add=True)
category = models.ForeignKey('blog.Category', related_name='posts', on_delete=models.CASCADE)
objects = PostManager()
def __unicode__(self):
return '%s' % self.title
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("blog.views.post", kwargs={"slug": self.slug})
def get_api_url(self):
return reverse("posts-api:detail", kwargs={"slug": self.slug})
@property
def comments(self):
instance = self
qs = Comment.objects.filter_by_instance(instance)
return qs
class CategoryManager(models.Manager):
def active(self, *args, **kwargs):
return super(CategoryManager, self)
class Category(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=100, db_index=True)
slug = models.SlugField(max_length=100, db_index=True)
class Meta:
verbose_name = ("Category")
verbose_name_plural = ("Categories")
objects = CategoryManager()
def __unicode__(self):
return '%s' % self.title
def get_absolute_url(self):
return reverse('blog.views.post', args=[self.slug])
| 35.7875
| 103
| 0.691233
| 2,044
| 0.713936
| 0
| 0
| 133
| 0.046455
| 0
| 0
| 689
| 0.240657
|
dd1d3ef072d0bbe5516060cc303f3f2982632867
| 305
|
py
|
Python
|
django_obfuscator/testobfuscator/models.py
|
vishnuc91/obfuscator-date
|
d4424cb7823dbf20543c5cc2bc0ce48d8d62a69a
|
[
"Apache-2.0"
] | null | null | null |
django_obfuscator/testobfuscator/models.py
|
vishnuc91/obfuscator-date
|
d4424cb7823dbf20543c5cc2bc0ce48d8d62a69a
|
[
"Apache-2.0"
] | null | null | null |
django_obfuscator/testobfuscator/models.py
|
vishnuc91/obfuscator-date
|
d4424cb7823dbf20543c5cc2bc0ce48d8d62a69a
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Create your models here.
class MyModel(models.Model):
aname = models.CharField(max_length=100, null=True, blank=True)
anint = models.IntegerField(default=999)
astring = models.CharField(max_length=50)
date = models.DateField('Date', null=True, blank=True)
| 30.5
| 67
| 0.734426
| 246
| 0.806557
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.104918
|
dd1d5f6520079cb76d00ac62e5817128a147acb0
| 69
|
py
|
Python
|
pyintercept/handlers/print_handler.py
|
caioariede/pyintercept
|
19039ce3038521bf32aaafe207024adeb0096749
|
[
"MIT"
] | 32
|
2015-07-20T21:13:26.000Z
|
2018-04-05T13:53:28.000Z
|
pyintercept/handlers/print_handler.py
|
caioariede/pyintercept
|
19039ce3038521bf32aaafe207024adeb0096749
|
[
"MIT"
] | 2
|
2019-07-23T17:38:06.000Z
|
2020-02-27T13:38:02.000Z
|
pyintercept/handlers/print_handler.py
|
caioariede/pyintercept
|
19039ce3038521bf32aaafe207024adeb0096749
|
[
"MIT"
] | 3
|
2015-08-09T14:48:38.000Z
|
2020-02-27T12:58:46.000Z
|
def print_(origfn, *args, **kwargs):
print args
print kwargs
| 17.25
| 36
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dd1ed841552b8b3a90cb7777b80332b35c886661
| 7,621
|
py
|
Python
|
PySyft_dev/FL_BC/cryptolib/wrapper_pyca.py
|
samuelxu999/FederatedLearning_dev
|
354d951c53ee20eb41bf7980210d61b7a358d341
|
[
"MIT"
] | null | null | null |
PySyft_dev/FL_BC/cryptolib/wrapper_pyca.py
|
samuelxu999/FederatedLearning_dev
|
354d951c53ee20eb41bf7980210d61b7a358d341
|
[
"MIT"
] | 2
|
2021-03-17T23:27:00.000Z
|
2021-03-17T23:27:01.000Z
|
PySyft_dev/FL_BC/cryptolib/wrapper_pyca.py
|
samuelxu999/FederatedLearning_dev
|
354d951c53ee20eb41bf7980210d61b7a358d341
|
[
"MIT"
] | 2
|
2019-04-23T22:13:18.000Z
|
2019-08-19T01:39:51.000Z
|
'''
========================
Wrapper_pyca module
========================
Created on Nov.7, 2017
@author: Xu Ronghua
@Email: rxu22@binghamton.edu
@TaskDescription: This module provide cryptography function based on pyca API.
@Reference:https://cryptography.io/en/latest/
'''
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec, dsa
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat, PrivateFormat, BestAvailableEncryption
from cryptography.hazmat.primitives import serialization
from cryptography.exceptions import InvalidSignature
class Crypto_DSA(object):
'''
Generate key pairs as json fromat
@in: key_size
@out: key_pairs={'private_key':x,'public_key':{'y':y,'p':p,'q':q,'g':g}}
'''
@staticmethod
def generate_key_pairs(key_size=1024):
#define key_pairs dictionary
key_pairs={}
#generate private key
private_key = dsa.generate_private_key(key_size=key_size, backend=default_backend())
private_number=private_key.private_numbers()
#add private key value - x
key_pairs['private_key']=private_number.x
#get private key from private_key
public_key = private_key.public_key()
#get public number
public_numbers=public_key.public_numbers()
y=public_numbers.y
p=public_numbers.parameter_numbers.p
q=public_numbers.parameter_numbers.q
g=public_numbers.parameter_numbers.g
#add public_key_numbers value - y, p, q, g
public_keys_numbers={'y':y, 'p':p, 'q':q, 'g':g}
key_pairs['public_key']=public_keys_numbers
return key_pairs
'''
Display out key pairs data on screen
@in: key_pairs={'private_key':x,'public_key':{'y':y,'p':p,'q':q,'g':g}}
@out: print out key pairs data on screen
'''
@staticmethod
def display_key_pairs(key_pairs):
print("private key value x:%d" %(key_pairs['private_key']))
public_keys_numbers=key_pairs['public_key']
print("public key value y:%d" %(public_keys_numbers['y']))
print("public key value p:%d" %(public_keys_numbers['p']))
print("public key value q:%d" %(public_keys_numbers['q']))
print("public key value g:%d" %(public_keys_numbers['g']))
'''
Get public key object given public key numbers
@in: public_key_numbers={'public_key':{'y':y,'p':p,'q':q,'g':g}}
@out: public_key object
'''
@staticmethod
def get_public_key(public_key_numbers):
y=public_key_numbers['y']
p=public_key_numbers['p']
q=public_key_numbers['q']
g=public_key_numbers['g']
#construct public key based on public_key_numbers
parameter_numbers=dsa.DSAParameterNumbers(p,q,g)
publick_number=dsa.DSAPublicNumbers(y,parameter_numbers)
public_key=publick_number.public_key(default_backend())
#print(publick_number)
return public_key
'''
Get private key object given private key numbers
@in: private_key_numbers={'publicprivate_key':x}
@in: public_key_numbers={'public_key':{'y':y,'p':p,'q':q,'g':g}}
@out: private_key object
'''
@staticmethod
def get_private_key(x, public_key_numbers):
#reconstruct private key
private_numbers=dsa.DSAPrivateNumbers(x, public_key_numbers)
#construct private_key based on private_numbers
private_key=private_numbers.private_key(default_backend())
return private_key
'''
Generate signature by signing data
@in: private_key object
@in: sign_data
@out: signature
'''
@staticmethod
def sign(private_key, sign_data):
signature=private_key.sign(sign_data,hashes.SHA256())
return signature
'''
Verify signature by using public_key
@in: public_key object
@in: signature
@in: sign_data
@out: True or False
'''
@staticmethod
def verify(public_key, signature, sign_data):
try:
public_key.verify(signature, sign_data, hashes.SHA256())
except InvalidSignature:
return False
except:
return False
return True
'''
Generate public key bytes
@in: public_key object
@in: encoding- Encoding.PEM or Encoding.DER
@out: public_key_bytes
'''
@staticmethod
def get_public_key_bytes(public_key, encoding=Encoding.PEM):
public_key_bytes=public_key.public_bytes(encoding, PublicFormat.SubjectPublicKeyInfo)
return public_key_bytes
'''
Generate public_key object by loading public key bytes
@in: public_key_bytes
@in: encoding- Encoding.PEM or Encoding.DER
@out: public_key object
'''
@staticmethod
def load_public_key_bytes(public_key_bytes,encoding=Encoding.PEM):
if(encoding==Encoding.PEM):
public_key=serialization.load_pem_public_key(public_key_bytes, default_backend())
elif(encoding==Encoding.DER):
public_key=serialization.load_der_public_key(public_key_bytes, default_backend())
else:
public_key=''
return public_key
'''
Generate private key bytes
@in: private_key object
@in: encryp_pw- password for encryption private_key_bytes
@in: encoding- Encoding.PEM or Encoding.DER
@in: private_format- PrivateFormat.PKCS8 or PrivateFormat.TraditionalOpenSSL
@out: private_key_bytes
'''
@staticmethod
def get_private_key_bytes(private_key, encryp_pw=b'rootpasswd', encoding=Encoding.PEM, private_format=PrivateFormat.PKCS8):
private_key_bytes=private_key.private_bytes(encoding, private_format, BestAvailableEncryption(bytes(encryp_pw)))
return private_key_bytes
'''
Generate private_key object by loading public key bytes
@in: private_key_bytes
@in: encryp_pw- password for encryption private_key_bytes
@in: encoding- Encoding.PEM or Encoding.DER
@out: private_key object
'''
@staticmethod
def load_private_key_bytes(private_key_bytes, encryp_pw=b'rootpasswd', encoding=Encoding.PEM):
if(encoding==Encoding.PEM):
private_key=serialization.load_pem_private_key(private_key_bytes, encryp_pw, default_backend())
elif(encoding==Encoding.DER):
private_key=serialization.load_der_private_key(private_key_bytes, encryp_pw, default_backend())
else:
private_key=''
return private_key
'''
Save key bytes data in key_file
@in: key_bytes
@in: key_file
'''
@staticmethod
def save_key_bytes(key_bytes, key_file):
fname = open(key_file, 'w')
fname.write("%s" %(key_bytes.decode(encoding='UTF-8')))
fname.close()
'''
Load key bytes data from key_file
@in: key_file
@out: key_bytes
'''
@staticmethod
def load_key_bytes(key_file):
fname = open(key_file, 'r')
key_bytes=fname.read().encode(encoding='UTF-8')
fname.close()
return key_bytes
# Message digests (Hashing) related function
class Crypto_Hash(object):
'''
Generate hash value given input data
@in: byte_data
@out: hashed_value
'''
@staticmethod
def generate_hash(byte_data):
#new digest hash instance
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
# apply hash function to data block
digest.update(byte_data)
# Finalize the current context and return the message digest as bytes.
hash_block=digest.finalize()
return hash_block
'''
verify hash value of given input data
@in: hash_data
@in: byte_data
@out: hashed_value
'''
@staticmethod
def verify_hash(hash_data, byte_data):
#new digest hash instance
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
# apply hash function to data block
digest.update(byte_data)
# Finalize the current context and return the message digest as bytes.
hash_block=digest.finalize()
return hash_data==hash_block
'''
Get all dataset
'''
def test_func():
hash_value=Crypto_Hash.generate_hash(b'samuel')
print(Crypto_Hash.verify_hash(hash_value, b'samuel'))
pass
if __name__ == "__main__":
test_func()
pass
| 28.543071
| 124
| 0.746228
| 6,650
| 0.872589
| 0
| 0
| 4,506
| 0.591261
| 0
| 0
| 3,205
| 0.420548
|
dd1f85e853fc4ae8cfcfa14f28add26fec35c361
| 693
|
py
|
Python
|
src/utils/formatter.py
|
RuhuiCheng/ladybug
|
fa9e1ea660dd040d3ecfde96ad6c4db67df9bcb9
|
[
"Apache-2.0"
] | 4
|
2020-03-14T10:43:29.000Z
|
2020-09-23T11:15:44.000Z
|
src/utils/formatter.py
|
RuhuiCheng/ladybug
|
fa9e1ea660dd040d3ecfde96ad6c4db67df9bcb9
|
[
"Apache-2.0"
] | null | null | null |
src/utils/formatter.py
|
RuhuiCheng/ladybug
|
fa9e1ea660dd040d3ecfde96ad6c4db67df9bcb9
|
[
"Apache-2.0"
] | null | null | null |
import logging
import json
from src.utils.ucm import app_id, env
class JsonLogFormatter(logging.Formatter):
def format(self, record):
msg = ''
if record.exc_text is None:
msg = record.message
else:
msg = record.exc_text
data = {
'app_id': ''+app_id+'',
'asctime': ''+record.asctime+'',
'env': ''+env+'',
'file_name': ''+record.filename+'',
'func_name': ''+record.funcName+'',
'level': ''+record.levelname+'',
'line_number': record.lineno,
'message': ''+msg+''
}
string_msg = json.dumps(data)
return string_msg
| 26.653846
| 47
| 0.506494
| 625
| 0.901876
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.148629
|
dd21e59c37175256cff1379b7b03b4b83f129381
| 363
|
py
|
Python
|
afterglow_core/resources/job_plugins/__init__.py
|
SkynetRTN/afterglow-access-server
|
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
|
[
"Apache-2.0"
] | 2
|
2021-05-24T15:12:07.000Z
|
2022-02-17T19:58:16.000Z
|
afterglow_core/resources/job_plugins/__init__.py
|
SkynetRTN/afterglow-access-server
|
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
|
[
"Apache-2.0"
] | 1
|
2022-02-27T03:01:06.000Z
|
2022-02-27T03:01:06.000Z
|
afterglow_core/resources/job_plugins/__init__.py
|
SkynetRTN/afterglow-access-server
|
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
|
[
"Apache-2.0"
] | 2
|
2021-06-08T18:16:40.000Z
|
2021-07-09T14:19:49.000Z
|
"""
Afterglow Core: job plugin package
A job plugin must define a custom model subclassing from
:class:`afterglow_core.models.jobs.Job`, along with the optional custom result
and settings models (subclassing from
:class:`afterglow_core.models.jobs.JobResult` and
:class:`afterglow_core.schemas.AfterglowSchema`, respectively), and implement
:meth:`Job.run`.
"""
| 33
| 78
| 0.793388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 362
| 0.997245
|
dd2300aac8a3080e89edc939e28aa0516c80f6a3
| 4,909
|
py
|
Python
|
wotpy/wot/dictionaries/thing.py
|
JKRhb/wot-py
|
3eaa780189b686c82b7dbdea404fd8077bd3c9f9
|
[
"MIT"
] | null | null | null |
wotpy/wot/dictionaries/thing.py
|
JKRhb/wot-py
|
3eaa780189b686c82b7dbdea404fd8077bd3c9f9
|
[
"MIT"
] | null | null | null |
wotpy/wot/dictionaries/thing.py
|
JKRhb/wot-py
|
3eaa780189b686c82b7dbdea404fd8077bd3c9f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Wrapper class for dictionaries to represent Things.
"""
import six
from wotpy.wot.dictionaries.base import WotBaseDict
from wotpy.wot.dictionaries.interaction import PropertyFragmentDict, ActionFragmentDict, EventFragmentDict
from wotpy.wot.dictionaries.link import LinkDict
from wotpy.wot.dictionaries.security import SecuritySchemeDict
from wotpy.utils.utils import to_camel
from wotpy.wot.dictionaries.version import VersioningDict
from wotpy.wot.enums import SecuritySchemeType
class ThingFragment(WotBaseDict):
"""ThingFragment is a wrapper around a dictionary that contains properties
representing semantic metadata and interactions (Properties, Actions and Events).
It is used for initializing an internal representation of a Thing Description,
and it is also used in ThingFilter."""
class Meta:
fields = {
"id",
"version",
"name",
"description",
"support",
"created",
"lastModified",
"base",
"properties",
"actions",
"events",
"links",
"security"
}
required = {
"id"
}
fields_readonly = [
"id"
]
fields_str = [
"name",
"description",
"support",
"created",
"lastModified",
"base"
]
fields_dict = [
"properties",
"actions",
"events"
]
fields_list = [
"links",
"security"
]
fields_instance = [
"version"
]
assert set(fields_readonly + fields_str + fields_dict + fields_list + fields_instance) == fields
def __setattr__(self, name, value):
"""Checks to see if the attribute that is being set is a
Thing fragment property and updates the internal dict."""
name_camel = to_camel(name)
if name_camel not in self.Meta.fields:
return super(ThingFragment, self).__setattr__(name, value)
if name_camel in self.Meta.fields_readonly:
raise AttributeError("Can't set attribute {}".format(name))
if name_camel in self.Meta.fields_str:
self._init[name_camel] = value
return
if name_camel in self.Meta.fields_dict:
self._init[name_camel] = {key: val.to_dict() for key, val in six.iteritems(value)}
return
if name_camel in self.Meta.fields_list:
self._init[name_camel] = [item.to_dict() for item in value]
return
if name_camel in self.Meta.fields_instance:
self._init[name_camel] = value.to_dict()
return
@property
def name(self):
"""The name of the Thing.
This property returns the ID if the name is undefined."""
return self._init.get("name", self.id)
@property
def security(self):
"""Set of security configurations, provided as an array,
that must all be satisfied for access to resources at or
below the current level, if not overridden at a lower level.
A default nosec security scheme will be provided if none are defined."""
if "security" not in self._init:
return [SecuritySchemeDict.build({"scheme": SecuritySchemeType.NOSEC})]
return [SecuritySchemeDict.build(item) for item in self._init.get("security")]
@property
def properties(self):
"""The properties optional attribute represents a dict with keys
that correspond to Property names and values of type PropertyFragment."""
return {
key: PropertyFragmentDict(val)
for key, val in six.iteritems(self._init.get("properties", {}))
}
@property
def actions(self):
"""The actions optional attribute represents a dict with keys
that correspond to Action names and values of type ActionFragment."""
return {
key: ActionFragmentDict(val)
for key, val in six.iteritems(self._init.get("actions", {}))
}
@property
def events(self):
"""The events optional attribute represents a dictionary with keys
that correspond to Event names and values of type EventFragment."""
return {
key: EventFragmentDict(val)
for key, val in six.iteritems(self._init.get("events", {}))
}
@property
def links(self):
"""The links optional attribute represents an array of Link objects."""
return [LinkDict(item) for item in self._init.get("links", [])]
@property
def version(self):
"""Provides version information."""
return VersioningDict(self._init.get("version")) if self._init.get("version") else None
| 29.932927
| 106
| 0.606233
| 4,371
| 0.890405
| 0
| 0
| 2,041
| 0.415767
| 0
| 0
| 1,754
| 0.357303
|
dd25aadd6a2a27a37d069c617bb272fc239e2c39
| 1,044
|
py
|
Python
|
MyApp/config.py
|
muayyad-alsadi/uPyApp
|
e81fb9d336a843d068ae93c866cf503989ef6a60
|
[
"Apache-2.0"
] | null | null | null |
MyApp/config.py
|
muayyad-alsadi/uPyApp
|
e81fb9d336a843d068ae93c866cf503989ef6a60
|
[
"Apache-2.0"
] | null | null | null |
MyApp/config.py
|
muayyad-alsadi/uPyApp
|
e81fb9d336a843d068ae93c866cf503989ef6a60
|
[
"Apache-2.0"
] | 1
|
2020-03-18T07:27:27.000Z
|
2020-03-18T07:27:27.000Z
|
__all__ = ['app_config']
import sys, os, os.path
try: import ConfigParser as configparser
except ImportError: import configparser
try: import simplejson as json
except ImportError: import json
from .base_utils import *
class AppConfig(configparser.RawConfigParser):
config_filename = 'my-app.ini'
main_section = 'MyApp'
def __init__(self, *args, **kw):
configparser.RawConfigParser.__init__(self, *args, **kw)
self.base_dir = base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
self.read(os.path.join(self.base_dir, self.config_filename))
self.log_level = self.opt_string(self.main_section, 'log-level', 'info').strip().upper()
self.is_debug = self.log_level=='DEBUG'
def opt_string(self, section, key, fallback=None):
return self.get(section, key) if self.has_option(section, key) else fallback
def opt_int(self, section, key, fallback=0):
return try_int(self.opt_string(section, key, str(fallback)), fallback)
app_config=AppConfig()
| 36
| 97
| 0.707854
| 796
| 0.762452
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.056513
|
dd25b254cf6453ad21e303d8fb8dc65ace25ddf6
| 1,131
|
py
|
Python
|
src/models/losses/corr_loss.py
|
yewzijian/RegTR
|
64e5b3f0ccc1e1a11b514eb22734959d32e0cec6
|
[
"MIT"
] | 25
|
2022-03-28T06:26:16.000Z
|
2022-03-30T14:21:24.000Z
|
src/models/losses/corr_loss.py
|
yewzijian/RegTR
|
64e5b3f0ccc1e1a11b514eb22734959d32e0cec6
|
[
"MIT"
] | null | null | null |
src/models/losses/corr_loss.py
|
yewzijian/RegTR
|
64e5b3f0ccc1e1a11b514eb22734959d32e0cec6
|
[
"MIT"
] | 2
|
2022-03-29T09:37:50.000Z
|
2022-03-30T06:26:35.000Z
|
import torch
import torch.nn as nn
from utils.se3_torch import se3_transform_list
_EPS = 1e-6
class CorrCriterion(nn.Module):
"""Correspondence Loss.
"""
def __init__(self, metric='mae'):
super().__init__()
assert metric in ['mse', 'mae']
self.metric = metric
def forward(self, kp_before, kp_warped_pred, pose_gt, overlap_weights=None):
losses = {}
B = pose_gt.shape[0]
kp_warped_gt = se3_transform_list(pose_gt, kp_before)
corr_err = torch.cat(kp_warped_pred, dim=0) - torch.cat(kp_warped_gt, dim=0)
if self.metric == 'mae':
corr_err = torch.sum(torch.abs(corr_err), dim=-1)
elif self.metric == 'mse':
corr_err = torch.sum(torch.square(corr_err), dim=-1)
else:
raise NotImplementedError
if overlap_weights is not None:
overlap_weights = torch.cat(overlap_weights)
mean_err = torch.sum(overlap_weights * corr_err) / torch.clamp_min(torch.sum(overlap_weights), _EPS)
else:
mean_err = torch.mean(corr_err, dim=1)
return mean_err
| 27.585366
| 112
| 0.625111
| 1,031
| 0.911583
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.049514
|
dd26b6dd687da7d2ec0ed40d629b6615e9538af8
| 501
|
py
|
Python
|
application/services/balance_service.py
|
singnet/token-balances-service
|
5e32b11bbad46e9df2820132026ab993935f8049
|
[
"MIT"
] | null | null | null |
application/services/balance_service.py
|
singnet/token-balances-service
|
5e32b11bbad46e9df2820132026ab993935f8049
|
[
"MIT"
] | 1
|
2021-04-07T14:40:02.000Z
|
2021-04-07T14:40:02.000Z
|
application/services/balance_service.py
|
singnet/token-balances-service
|
5e32b11bbad46e9df2820132026ab993935f8049
|
[
"MIT"
] | 3
|
2021-04-07T14:12:00.000Z
|
2021-04-27T07:18:34.000Z
|
from infrastructure.repository.token_snapshot_repo import TokenSnapshotRepo
from http import HTTPStatus
def get_snapshot_by_address(address):
balance = TokenSnapshotRepo().get_token_balance(address)
if balance is None:
data = None
statusCode = HTTPStatus.BAD_REQUEST.value
message = "Address not found in snapshot"
else:
data = balance
statusCode = HTTPStatus.OK.value
message = HTTPStatus.OK.phrase
return statusCode, message, data
| 27.833333
| 75
| 0.718563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.061876
|
dd272efee44a376502bf4522d14dd1625b93c91b
| 5,015
|
py
|
Python
|
vaccine_allocation/TN_proj.py
|
COVID-IWG/epimargin-studies
|
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
|
[
"MIT"
] | null | null | null |
vaccine_allocation/TN_proj.py
|
COVID-IWG/epimargin-studies
|
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
|
[
"MIT"
] | null | null | null |
vaccine_allocation/TN_proj.py
|
COVID-IWG/epimargin-studies
|
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
|
[
"MIT"
] | null | null | null |
from typing import Callable, Tuple
from epimargin.models import SIR
import pandas as pd
from epimargin.estimators import analytical_MPVS
from epimargin.etl.covid19india import data_path, get_time_series, load_all_data
import epimargin.plots as plt
from epimargin.smoothing import notched_smoothing
from epimargin.utils import cwd, weeks
from studies.vaccine_allocation.commons import *
from studies.vaccine_allocation.epi_simulations import *
from tqdm import tqdm
# model details
CI = 0.95
smoothing = 7
root = cwd()
data = root/"data"
figs = root/"figs"
data.mkdir(exist_ok=True)
figs.mkdir(exist_ok=True)
# define data versions for api files
paths = {
"v3": [data_path(i) for i in (1, 2)],
"v4": [data_path(i) for i in range(3, 26)]
}
for target in paths['v3'] + paths['v4']:
try:
download_data(data, target)
except:
pass
df = load_all_data(
v3_paths = [data/filepath for filepath in paths['v3']],
v4_paths = [data/filepath for filepath in paths['v4']]
)
# cutoff = None
# cutoff = "April 7, 2021"
cutoff = "April 14, 2021"
if cutoff:
df = df[df.date_announced <= cutoff]
data_recency = str(df["date_announced"].max()).split()[0]
run_date = str(pd.Timestamp.now()).split()[0]
ts = get_time_series(
df[df.detected_state == "Tamil Nadu"],
["detected_state", "detected_district"]
)\
.drop(columns = ["date", "time", "delta", "logdelta"])\
.rename(columns = {
"Deceased": "dD",
"Hospitalized": "dT",
"Recovered": "dR"
}).droplevel(0)\
.drop(labels = ["Other State", "Railway Quarantine", "Airport Quarantine"])
district_estimates = []
simulation_initial_conditions = pd.read_csv(data/f"all_india_coalesced_initial_conditions{simulation_start.strftime('%b%d')}.csv")\
.drop(columns = ["Unnamed: 0"])\
.set_index(["state", "district"])\
.loc["Tamil Nadu"]
def setup(district) -> Tuple[Callable[[str], SIR], pd.DataFrame]:
demographics = simulation_initial_conditions.loc[district]
dR_conf = ts.loc[district].dR
dR_conf = dR_conf.reindex(pd.date_range(dR_conf.index.min(), dR_conf.index.max()), fill_value = 0)
dR_conf_smooth = pd.Series(smooth(dR_conf), index = dR_conf.index).clip(0).astype(int)
R_conf_smooth = dR_conf_smooth.cumsum().astype(int)
R0 = R_conf_smooth[data_recency]
dD_conf = ts.loc[district].dD
dD_conf = dD_conf.reindex(pd.date_range(dD_conf.index.min(), dD_conf.index.max()), fill_value = 0)
dD_conf_smooth = pd.Series(smooth(dD_conf), index = dD_conf.index).clip(0).astype(int)
D_conf_smooth = dD_conf_smooth.cumsum().astype(int)
D0 = D_conf_smooth[data_recency]
dT_conf = ts.loc[district].dT
dT_conf = dT_conf.reindex(pd.date_range(dT_conf.index.min(), dT_conf.index.max()), fill_value = 0)
(
dates,
Rt_pred, Rt_CI_upper, Rt_CI_lower,
T_pred, T_CI_upper, T_CI_lower,
total_cases, new_cases_ts,
*_
) = analytical_MPVS(ts.loc[district].dT, CI = CI, smoothing = notched_smoothing(window = smoothing), totals = False)
Rt_estimates = pd.DataFrame(data = {
"dates" : dates,
"Rt_pred" : Rt_pred,
"Rt_CI_upper" : Rt_CI_upper,
"Rt_CI_lower" : Rt_CI_lower,
"T_pred" : T_pred,
"T_CI_upper" : T_CI_upper,
"T_CI_lower" : T_CI_lower,
"total_cases" : total_cases[2:],
"new_cases_ts": new_cases_ts,
})
dT_conf_smooth = pd.Series(smooth(dT_conf), index = dT_conf.index).clip(0).astype(int)
T_conf_smooth = dT_conf_smooth.cumsum().astype(int)
T0 = T_conf_smooth[data_recency]
dT0 = dT_conf_smooth[data_recency]
S0 = max(0, demographics.N_tot - T0)
I0 = max(0, T0 - R0 - D0)
return (
lambda seed = 0: SIR(
name = district,
mortality = demographics[[f"N_{i}" for i in range(7)]] @ np.array(list(TN_IFRs.values()))/demographics.N_tot,
population = demographics.N_tot,
random_seed = seed,
infectious_period = 10,
S0 = S0,
I0 = I0,
R0 = R0,
D0 = D0,
dT0 = dT0,
Rt0 = Rt_estimates.set_index("dates").loc[data_recency].Rt_pred * demographics.N_tot/S0),
Rt_estimates
)
district_estimates = []
for district in tqdm(simulation_initial_conditions.index.get_level_values(0).unique()):
simulation, Rt_estimates = setup(district)
district_estimates.append(Rt_estimates.assign(district = district))
Rt_estimates.to_csv(data/f"TN_Rt_data_{district}_{data_recency}_run{run_date}.csv")
projections = pd.DataFrame(
np.array(
[simulation(_).run(6 * weeks).dT for _ in range(1000)]
)).astype(int).T\
.set_index(pd.date_range(start = data_recency, freq = "D", periods = 6*weeks + 1))
print(district, projections.mean(axis = 1))
projections.to_csv(data/f"TN_projections/projections_{district}_data{data_recency}_run{run_date}.csv")
| 34.586207
| 131
| 0.656032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 710
| 0.141575
|
dd27497c195aa372001cc5557855526c81484bc2
| 263
|
py
|
Python
|
nameyourapp/routes.py
|
WhereWeCanShare/minipy
|
485e9c4f122aa56ed8389d0ea7b5c16d59179aed
|
[
"BSD-3-Clause"
] | null | null | null |
nameyourapp/routes.py
|
WhereWeCanShare/minipy
|
485e9c4f122aa56ed8389d0ea7b5c16d59179aed
|
[
"BSD-3-Clause"
] | null | null | null |
nameyourapp/routes.py
|
WhereWeCanShare/minipy
|
485e9c4f122aa56ed8389d0ea7b5c16d59179aed
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Blueprint
main = Blueprint('main', __name__)
@main.route('/')
def main_index():
return '<div align="center"><img src="https://source.unsplash.com/1200x800/?technology,matrix,hacker,women"><p>Thanks Unsplash for nice photo</p></div>', 200
| 29.222222
| 161
| 0.714829
| 0
| 0
| 0
| 0
| 196
| 0.745247
| 0
| 0
| 154
| 0.585551
|
dd28575d99501b8ab89e76a54053a882db38d79c
| 1,514
|
py
|
Python
|
backend/db/test/id_allocator_test.py
|
xuantan/viewfinder
|
992209086d01be0ef6506f325cf89b84d374f969
|
[
"Apache-2.0"
] | 645
|
2015-01-03T02:03:59.000Z
|
2021-12-03T08:43:16.000Z
|
backend/db/test/id_allocator_test.py
|
hoowang/viewfinder
|
9caf4e75faa8070d85f605c91d4cfb52c4674588
|
[
"Apache-2.0"
] | null | null | null |
backend/db/test/id_allocator_test.py
|
hoowang/viewfinder
|
9caf4e75faa8070d85f605c91d4cfb52c4674588
|
[
"Apache-2.0"
] | 222
|
2015-01-07T05:00:52.000Z
|
2021-12-06T09:54:26.000Z
|
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Tests for IdAllocator data object.
"""
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
import unittest
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.id_allocator import IdAllocator
from base_test import DBBaseTestCase
class IdAllocatorTestCase(DBBaseTestCase):
@async_test
def testCreate(self):
alloc = IdAllocator('type', 13)
num_ids = 3000
def _OnAllocated(ids):
id_set = set(ids)
assert len(id_set) == num_ids
self.stop()
with util.ArrayBarrier(_OnAllocated) as b:
[alloc.NextId(self._client, callback=b.Callback()) for i in xrange(num_ids)]
@async_test
def testMultiple(self):
"""Tests that multiple allocations from the same sequence do
not overlap.
"""
allocs = [IdAllocator('type'), IdAllocator('type')]
num_ids = 3000
def _OnAllocated(id_lists):
assert len(id_lists) == 2
id_set1 = set(id_lists[0])
id_set2 = set(id_lists[1])
assert len(id_set1) == 3000
assert len(id_set2) == 3000
assert id_set1.isdisjoint(id_set2)
self.stop()
with util.ArrayBarrier(_OnAllocated) as b:
with util.ArrayBarrier(b.Callback()) as b1:
[allocs[0].NextId(self._client, b1.Callback()) for i in xrange(num_ids)]
with util.ArrayBarrier(b.Callback()) as b2:
[allocs[1].NextId(self._client, b2.Callback()) for i in xrange(num_ids)]
| 30.28
| 82
| 0.694188
| 1,144
| 0.755614
| 0
| 0
| 1,095
| 0.72325
| 0
| 0
| 242
| 0.159841
|
dd2928863b82fbf5dba0596d90335b5ef6bbbb9b
| 2,429
|
py
|
Python
|
ayame/link.py
|
hattya/ayame
|
e8bb2b0ace79cd358b1384270cb9c5e809e12b5d
|
[
"MIT"
] | 1
|
2022-03-05T03:21:13.000Z
|
2022-03-05T03:21:13.000Z
|
ayame/link.py
|
hattya/ayame
|
e8bb2b0ace79cd358b1384270cb9c5e809e12b5d
|
[
"MIT"
] | 1
|
2021-08-25T13:41:34.000Z
|
2021-08-25T13:41:34.000Z
|
ayame/link.py
|
hattya/ayame
|
e8bb2b0ace79cd358b1384270cb9c5e809e12b5d
|
[
"MIT"
] | 1
|
2018-03-04T21:47:27.000Z
|
2018-03-04T21:47:27.000Z
|
#
# ayame.link
#
# Copyright (c) 2012-2021 Akinori Hattori <hattya@gmail.com>
#
# SPDX-License-Identifier: MIT
#
import urllib.parse
from . import core, markup, uri, util
from . import model as mm
from .exception import ComponentError
__all__ = ['Link', 'ActionLink', 'PageLink']
# HTML elements
_A = markup.QName(markup.XHTML_NS, 'a')
_LINK = markup.QName(markup.XHTML_NS, 'link')
_AREA = markup.QName(markup.XHTML_NS, 'area')
_SCRIPT = markup.QName(markup.XHTML_NS, 'script')
_STYLE = markup.QName(markup.XHTML_NS, 'style')
# HTML attributes
_HREF = markup.QName(markup.XHTML_NS, 'href')
_SRC = markup.QName(markup.XHTML_NS, 'src')
class Link(core.MarkupContainer):
def __init__(self, id, model=None):
if isinstance(model, str):
model = mm.Model(model)
super().__init__(id, model)
def on_render(self, element):
# modify attribute
attr = None
if element.qname in (_A, _LINK, _AREA):
attr = _HREF
elif element.qname in (_SCRIPT, _STYLE):
attr = _SRC
if attr is not None:
uri = self.new_uri(element.attrib.get(attr))
if uri is None:
if attr in element.attrib:
del element.attrib[attr]
else:
element.attrib[attr] = uri
# replace children by model object
body = self.model_object_as_string()
if body:
element[:] = (body,)
# render link
return super().on_render(element)
def new_uri(self, uri):
return uri
class ActionLink(Link):
def on_fire(self):
self.on_click()
def new_uri(self, _):
query = self.request.query.copy()
query[core.AYAME_PATH] = [self.path()]
environ = self.environ.copy()
environ['QUERY_STRING'] = urllib.parse.urlencode(query, doseq=True)
return uri.request_uri(environ, True)
def on_click(self):
pass
class PageLink(Link):
def __init__(self, id, page, values=None, anchor=''):
super().__init__(id, None)
if (not issubclass(page, core.Page)
or page is core.Page):
raise ComponentError(self, f"'{util.fqon_of(page)}' is not a subclass of Page")
self._page = page
self._values = values
self._anchor = anchor
def new_uri(self, uri):
return self.uri_for(self._page, self._values, self._anchor)
| 26.692308
| 91
| 0.611774
| 1,776
| 0.731165
| 0
| 0
| 0
| 0
| 0
| 0
| 343
| 0.14121
|
dd2b17b19b93b0baf9bf8e30437c71d115b7442f
| 885
|
py
|
Python
|
local/race_sim.py
|
tractiming/trac-gae
|
46c4a1fe409a45e8595210a5cf242425d40d4b41
|
[
"MIT"
] | 3
|
2020-09-13T04:56:31.000Z
|
2021-05-26T11:46:08.000Z
|
local/race_sim.py
|
tractiming/trac-gae
|
46c4a1fe409a45e8595210a5cf242425d40d4b41
|
[
"MIT"
] | null | null | null |
local/race_sim.py
|
tractiming/trac-gae
|
46c4a1fe409a45e8595210a5cf242425d40d4b41
|
[
"MIT"
] | 1
|
2020-05-09T10:05:08.000Z
|
2020-05-09T10:05:08.000Z
|
import requests
import datetime
import time
UPDATE_URL = "http://www.trac-us.appspot.com/api/updates/"
#UPDATE_URL = "http://localhost:8000/api/updates/"
def post_split(reader_code, tag_code, time):
formatted_time = time.strftime("%Y/%m/%d %H:%M:%S.%f")
payload = {'r': reader_code,
's': r"[['{0}', '{1}'],]".format(tag_code, formatted_time)}
r = requests.post(UPDATE_URL, data=payload)
print r.content
def main():
tag_str = '12345'
reader_str = 'A1010'
all_tags = ['test %04d' %i for i in range(1,51)]
while True:
for tag in all_tags:
post_split(reader_str, tag, datetime.datetime.now())
print 'splits posted ------------------------'
time.sleep(40)
#print all_tags
#post_split(reader_str, tag_str, datetime.datetime.utcnow())
if __name__ == "__main__":
main()
| 20.113636
| 74
| 0.59661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 293
| 0.331073
|
dd2b8d0943d4247577bcc13dba218fa49f1ddda9
| 5,775
|
py
|
Python
|
classes.py
|
mattjoman/deep-RL-snake
|
c1b48ef3cb7ac0ad068887df1f60bc83a626f9d6
|
[
"MIT"
] | null | null | null |
classes.py
|
mattjoman/deep-RL-snake
|
c1b48ef3cb7ac0ad068887df1f60bc83a626f9d6
|
[
"MIT"
] | null | null | null |
classes.py
|
mattjoman/deep-RL-snake
|
c1b48ef3cb7ac0ad068887df1f60bc83a626f9d6
|
[
"MIT"
] | null | null | null |
import pygame
import numpy as np
import random
import torch
from torch import nn
from torch.nn import functional as F
class CNN(torch.nn.Module):
def __init__(self):
super(CNN, self).__init__()
torch.manual_seed(50)
self.layer1 = nn.Sequential(
# input: (1, 1, 10, 10)
# output: (1, 8, 18, 18)
nn.Conv2d(3, 32, (3, 3), stride=1),
nn.ReLU())
self.layer2 = nn.Sequential(
# input: (8, 8, 8, 8)
# output: (8, 8, 6, 6)
nn.Conv2d(32, 64, (3, 3), stride=1),
nn.ReLU())
self.layer3 = nn.Sequential(
# input: (8, 8, 6, 6)
# output: (8, 8, 4, 4)
nn.Conv2d(64, 32, (3, 3), stride=1),
nn.ReLU())
self.layer4 = nn.Sequential(
# input: (32*4*4)
nn.Linear(512, 128, bias=True),
nn.ReLU())
self.layer5 = nn.Sequential(
nn.Linear(128, 4, bias=True))
#self.optimiser = torch.optim.SGD(self.parameters(), lr=1)
self.optimiser = torch.optim.Adam(self.parameters(), lr=1)
def forward(self, x):
out = self.layer1(x.to(torch.float32))
out = self.layer2(out)
out = self.layer3(out)
out = out.view(out.size(0), -1) # flatten
out = self.layer4(out)
out = self.layer5(out)
#print(out)
return out
class Snake():
def __init__(self, rows=10, columns=10):
self.direction = 3
self.init_body(rows, columns)
self.apple = False
self.score = 0
self.timestep_counter = 0
def add_to_body(self):
if self.direction == 2:
new_head = [self.body[0][0] - 1, self.body[0][1]]
elif self.direction == 3:
new_head = [self.body[0][0] + 1, self.body[0][1]]
elif self.direction == 0:
new_head = [self.body[0][0], self.body[0][1] - 1]
else:
new_head = [self.body[0][0], self.body[0][1] + 1]
self.body.insert(0, new_head)
return
def remove_from_body(self):
del self.body[-1]
return
def move(self):
self.add_to_body()
self.timestep_counter += 1
if not self.apple:
self.remove_from_body()
else:
self.apple = False
return
def eat_apple(self):
self.apple = True
self.score += 1
return
def init_score(self):
self.score = 0
return
def init_timestep_counter(self):
self.timestep_counter = 0
return
def init_body(self, rows, columns):
self.body = [[np.random.randint(1, rows-1), np.random.randint(1, columns-1)]]
return
class Player(Snake):
def set_direction(self, keys):
if keys[pygame.K_LEFT]:
self.direction = 0 # left
elif keys[pygame.K_RIGHT]:
self.direction = 1 # right
elif keys[pygame.K_UP]:
self.direction = 2 # up
elif keys[pygame.K_DOWN]:
self.direction = 3 # down
return
class AI(Snake):
def __init__(self):
super().__init__()
self.epsilon = 0.1
self.gamma = 0.3
self.Q_net = CNN()
self.target_Q_net = self.Q_net
self.replay_mem = []
self.replay_mem_limit = 500
self.batch_size = 64
self.game_count = 0
def set_direction(self, state):
Q_vals = self.Q_net.forward(torch.from_numpy(state))
self.direction, _ = self.select_action(Q_vals)
return
def select_action(self, Q_vals):
""" Returns the action selected and Q vals for each action """
max_ = Q_vals.max().item()
for i in range(4):
if Q_vals[0][i].item() == max_:
greedy_direction = i
random_num = np.random.uniform(0, 1)
self.epsilon = 1 / (self.game_count ** (1/2.5))
if random_num > self.epsilon:
return greedy_direction, max_
else:
return np.random.random_integers(0, 3), Q_vals[0][i].item()
def learn_from_mem(self):
if self.timestep_counter % 5 == 0:
self.target_Q_net = self.Q_net
if len(self.replay_mem) < self.batch_size:
return
for b in range(self.batch_size):
mem = self.select_mem()
reward = mem[2]
Q_0_vals = self.Q_net.forward(torch.from_numpy(mem[0]))
Q_1_vals = self.target_Q_net.forward(torch.from_numpy(mem[3]))
Q_0 = Q_0_vals[0][mem[1]] # get Q val for the action taken
Q_1 = Q_1_vals.max().detach() # get the maximum Q val for the next state
loss = F.smooth_l1_loss(Q_0, (self.gamma * Q_1) + reward)
self.Q_net.optimiser.zero_grad()
loss.backward()
for param in self.Q_net.parameters():
param.grad.data.clamp_(-1, 1) # do we need to clamp?
self.Q_net.optimiser.step()
return
def update_replay_mem(self, s0, a0, r, s1):
if len(self.replay_mem) >= self.replay_mem_limit:
del self.replay_mem[0]
self.replay_mem.append([s0, a0, r, s1])
return
def select_mem(self):
index = np.random.random_integers(0, len(self.replay_mem)-1)
return self.replay_mem[index]
class Apple():
def __init__(self, rows, columns):
self.set_loc(rows, columns)
def set_loc(self, rows, columns):
self.loc = [random.randint(1, rows-2), random.randint(1, columns-2)]
return
if __name__ == "__main__":
ai = AI()
state = np.random.rand(20, 20)
ai.set_direction(state)
print(ai.direction)
print(ai.body[0])
ai.move()
print(ai.body[0])
| 26.612903
| 86
| 0.546667
| 5,435
| 0.941126
| 0
| 0
| 0
| 0
| 0
| 0
| 419
| 0.072554
|
dd2f7e3c6dec1485c37b66c19a901c087ee62101
| 1,289
|
py
|
Python
|
examples/basic/turnOffLight_fb.py
|
quangthanh010290/EchoSkills
|
b90379cddb034a1e379fef77ca48c583bd6e47d5
|
[
"MIT"
] | null | null | null |
examples/basic/turnOffLight_fb.py
|
quangthanh010290/EchoSkills
|
b90379cddb034a1e379fef77ca48c583bd6e47d5
|
[
"MIT"
] | null | null | null |
examples/basic/turnOffLight_fb.py
|
quangthanh010290/EchoSkills
|
b90379cddb034a1e379fef77ca48c583bd6e47d5
|
[
"MIT"
] | null | null | null |
import logging
from random import randint
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
from firebase import firebase
import time
firebase = firebase.FirebaseApplication('https://iotdev-6b58b.firebaseio.com', None)
app = Flask(__name__)
ask = Ask(app, "/")
#logging.getLogger("flask_ask").setLevel(logging.DEBUG)
@ask.launch
def launch():
return question("Which room you want to turn off?")
@ask.intent("RoomIntent")
def intent_fcn(roomName):
msg = "Ligh on {} is turned off".format(roomName)
if roomName == "kitchen":
print 'Light on kichen room is turned off'
firebase.put('/devices/8795002','status',False)
if roomName == "living room":
print 'Light on living room is turn off'
firebase.put('/devices/390650','status',False)
if roomName == 'bathroom':
print 'Light on bath room is turn off'
firebase.put('/devices/390192','status',False)
return statement(msg)
@ask.intent("AMAZON.YesIntent")
def yes_fcn(roomName):
msg = "Ligh on {} is turned off".format(roomName)
print roomName
firebase.put('/123','states/001',False)
return statement(msg)
def stop():
return statement("Stoped")
if __name__ == '__main__':
app.run(debug=False,host = '0.0.0.0', port=7000)
| 22.614035
| 84
| 0.703646
| 0
| 0
| 0
| 0
| 782
| 0.606672
| 0
| 0
| 466
| 0.361521
|
dd333037440197eebf8dccb7cbf70627b566e58c
| 763
|
py
|
Python
|
home_app/migrations/0003_auto_20200412_0206.py
|
Ymirrp/Home-page
|
6ac9b5b76cc2b08298086c7e784685dad802c9d6
|
[
"MIT"
] | null | null | null |
home_app/migrations/0003_auto_20200412_0206.py
|
Ymirrp/Home-page
|
6ac9b5b76cc2b08298086c7e784685dad802c9d6
|
[
"MIT"
] | 7
|
2020-04-18T04:54:05.000Z
|
2020-04-29T14:49:46.000Z
|
home_app/migrations/0003_auto_20200412_0206.py
|
Ymirrp/Home-page
|
6ac9b5b76cc2b08298086c7e784685dad802c9d6
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-12 02:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home_app', '0002_auto_20200408_2205'),
]
operations = [
migrations.RemoveField(
model_name='topsites',
name='order_no',
),
migrations.AlterField(
model_name='topsites',
name='categories',
field=models.CharField(choices=[('HOBB', 'Áhugamál'), ('BOOK', 'Bókamerki'), ('NEWS', 'Fréttir'), ('SPOR', 'Íþróttir'), ('GAME', 'Leikir'), ('EDUC', 'Nám'), ('SOCI', 'Samfélagsmiðlar'), ('SHOP', 'Versla'), ('WORK', 'Vinna'), ('FAVO', 'Uppáhalds')], default='FAVO', max_length=4),
),
]
| 33.173913
| 292
| 0.55308
| 675
| 0.872093
| 0
| 0
| 0
| 0
| 0
| 0
| 298
| 0.385013
|
dd341ee91f7a33f3e372a0311daf89a77f9a9148
| 349
|
py
|
Python
|
tests/test_license_screening.py
|
sthagen/python-scaling-tribble
|
2bb2e41185ae2b0108f341751d0e4a2187909683
|
[
"MIT"
] | null | null | null |
tests/test_license_screening.py
|
sthagen/python-scaling-tribble
|
2bb2e41185ae2b0108f341751d0e4a2187909683
|
[
"MIT"
] | 18
|
2021-02-14T15:17:17.000Z
|
2021-02-14T17:46:27.000Z
|
tests/test_license_screening.py
|
sthagen/python-scaling-tribble
|
2bb2e41185ae2b0108f341751d0e4a2187909683
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import pytest # type: ignore
import tests.context as ctx
import license_screening.license_screening as lis
def test_parse_ok_empty_string():
assert lis.parse('') is NotImplemented
def test_parse_ok_known_tree():
assert lis.main(["tests/data"]) == 0
| 21.8125
| 60
| 0.744986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.318052
|
dd3452c9bed80f5adb210957aa5ef5db051c0f6c
| 301
|
py
|
Python
|
ml_progress/percent_metric.py
|
gregrolwes/ml_progress
|
e352f73847e163993ff9e642973512f070555805
|
[
"MIT"
] | null | null | null |
ml_progress/percent_metric.py
|
gregrolwes/ml_progress
|
e352f73847e163993ff9e642973512f070555805
|
[
"MIT"
] | null | null | null |
ml_progress/percent_metric.py
|
gregrolwes/ml_progress
|
e352f73847e163993ff9e642973512f070555805
|
[
"MIT"
] | null | null | null |
import sys
from .display import Display
class PercentMetric(Display):
def __init__(self, metric: str):
super().__init__()
self.metric = metric
def update(self, metrics: dict, width=25, height=1):
sys.stdout.write("%s: %3d%% \n" % (self.metric, metrics[self.metric]))
| 27.363636
| 78
| 0.644518
| 259
| 0.860465
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.046512
|
dd34c031db159b934c285da9deacefad0961aecf
| 762
|
py
|
Python
|
src/server/alembic/versions/6b8cf99be000_add_user_journal_table.py
|
princessruthie/paws-data-pipeline
|
6f7095f99b9ad31b0171b256cf18849d63445c91
|
[
"MIT"
] | 27
|
2019-11-20T20:20:30.000Z
|
2022-01-31T17:24:55.000Z
|
src/server/alembic/versions/6b8cf99be000_add_user_journal_table.py
|
mrcrnkovich/paws-data-pipeline
|
7c0bd4c5f23276f541611cb564f2f5abbb6b9887
|
[
"MIT"
] | 348
|
2019-11-26T20:34:02.000Z
|
2022-02-27T20:28:20.000Z
|
src/server/alembic/versions/6b8cf99be000_add_user_journal_table.py
|
mrcrnkovich/paws-data-pipeline
|
7c0bd4c5f23276f541611cb564f2f5abbb6b9887
|
[
"MIT"
] | 20
|
2019-12-03T23:50:33.000Z
|
2022-02-09T18:38:25.000Z
|
"""Add user journal table
Revision ID: 6b8cf99be000
Revises: 36c4ecbfd11a
Create Date: 2020-12-21 15:08:07.784568
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import func
# revision identifiers, used by Alembic.
revision = "6b8cf99be000"
down_revision = "36c4ecbfd11a"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"pdp_user_journal",
sa.Column("_id", sa.Integer, primary_key=True),
sa.Column("stamp", sa.DateTime, nullable=False, server_default=func.now()),
sa.Column("username", sa.String(50), nullable=False),
sa.Column("event_type", sa.String(50)),
sa.Column("detail", sa.String(120)),
)
def downgrade():
op.drop_table('pdp_user_journal')
| 23.090909
| 83
| 0.692913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 265
| 0.347769
|
dd368cbbf1f2713371fc20b46be0df6fde83d872
| 1,906
|
py
|
Python
|
Python/WearherTelegram/weatherbot.py
|
OnCode-channel/OnCode
|
4aa7022932bc5aece39121233b34ebea12063717
|
[
"CC0-1.0"
] | 3
|
2021-11-21T05:09:45.000Z
|
2021-11-21T09:55:02.000Z
|
Python/WearherTelegram/weatherbot.py
|
OnCode-channel/OnCode
|
4aa7022932bc5aece39121233b34ebea12063717
|
[
"CC0-1.0"
] | null | null | null |
Python/WearherTelegram/weatherbot.py
|
OnCode-channel/OnCode
|
4aa7022932bc5aece39121233b34ebea12063717
|
[
"CC0-1.0"
] | 1
|
2022-03-16T20:34:29.000Z
|
2022-03-16T20:34:29.000Z
|
import telebot
from pyowm import OWM
from pyowm.utils.config import get_default_config
bot = telebot.TeleBot("telegram API-key")
@bot.message_handler(commands=['start'])
def welcome(message):
bot.send_message(message.chat.id, 'Добро пожаловать, ' + str(message.from_user.first_name) + ',\n/start - запуск бота\n/help - команды бота\n/credits - автор бота\nЧтобы узнать погоду напишите в чат название города')
@bot.message_handler(commands=['help'])
def help(message):
bot.send_message(message.chat.id, '/start - запуск бота\n/help - команды бота\n/credits - автор бота\nЧтобы узнать погоду напишите в чат название города')
@bot.message_handler(content_types=['text'])
def test(message):
try:
place = message.text
config_dict = get_default_config()
config_dict['language'] = 'ru'
owm = OWM('owm api-key', config_dict)
mgr = owm.weather_manager()
observation = mgr.weather_at_place(place)
w = observation.weather
t = w.temperature("celsius")
t1 = t['temp']
t2 = t['feels_like']
t3 = t['temp_max']
t4 = t['temp_min']
wi = w.wind()['speed']
humi = w.humidity
cl = w.clouds
st = w.status
dt = w.detailed_status
ti = w.reference_time('iso')
pr = w.pressure['press']
vd = w.visibility_distance
bot.send_message(message.chat.id, "В городе " + str(place) + " температура " + str(t1) + " °C" + "\n" +
"Максимальная температура " + str(t3) + " °C" +"\n" +
"Минимальная температура " + str(t4) + " °C" + "\n" +
"Ощущается как" + str(t2) + " °C" + "\n" +
"Скорость ветра " + str(wi) + " м/с" + "\n" +
"Давление " + str(pr) + " мм.рт.ст" + "\n" +
"Влажность " + str(humi) + " %" + "\n" +
"Видимость " + str(vd) + " метров" + "\n" +
"Описание " + str(st) + "\n\n" + str(dt))
except:
bot.send_message(message.chat.id,"Такой город не найден!")
print(str(message.text),"- не найден")
bot.polling(none_stop=True, interval=0)
| 32.862069
| 217
| 0.64638
| 0
| 0
| 0
| 0
| 2,056
| 0.920734
| 0
| 0
| 1,001
| 0.448276
|
dd37cbcc06fe382942dc56ae1ef81d1193794b23
| 17
|
py
|
Python
|
appium/version.py
|
zgq346712481/python-client
|
1c179d2c84c76bbed33e6333f381cc8a86f3bb82
|
[
"Apache-2.0"
] | 1
|
2019-03-07T00:53:43.000Z
|
2019-03-07T00:53:43.000Z
|
appium/version.py
|
zgq346712481/python-client
|
1c179d2c84c76bbed33e6333f381cc8a86f3bb82
|
[
"Apache-2.0"
] | null | null | null |
appium/version.py
|
zgq346712481/python-client
|
1c179d2c84c76bbed33e6333f381cc8a86f3bb82
|
[
"Apache-2.0"
] | null | null | null |
version = '0.38'
| 8.5
| 16
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.352941
|
dd3bf5910219efb966cd48792f7c68202d3a54a8
| 130
|
py
|
Python
|
earth_enterprise/src/server/wsgi/serve/push/stream/__init__.py
|
ezeeyahoo/earthenterprise
|
b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9
|
[
"Apache-2.0"
] | 2,661
|
2017-03-20T22:12:50.000Z
|
2022-03-30T09:43:19.000Z
|
earth_enterprise/src/server/wsgi/serve/push/stream/__init__.py
|
ezeeyahoo/earthenterprise
|
b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9
|
[
"Apache-2.0"
] | 1,531
|
2017-03-24T17:20:32.000Z
|
2022-03-16T18:11:14.000Z
|
earth_enterprise/src/server/wsgi/serve/push/stream/__init__.py
|
ezeeyahoo/earthenterprise
|
b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9
|
[
"Apache-2.0"
] | 990
|
2017-03-24T11:54:28.000Z
|
2022-03-22T11:51:47.000Z
|
"""The package indicator for serve.push.stream.
Modules for transferring and registering servable stream data on the server.
"""
| 26
| 76
| 0.784615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 129
| 0.992308
|
dd3c2fd0cab5262a11100e98979113701b2c65bd
| 2,828
|
py
|
Python
|
Projects_Django/MyPets/tienda_mascotas/models.py
|
JuanCasanovaQ/Frameworks_8A
|
353838473b275d366cac8364ef4df72578c0aed8
|
[
"Apache-2.0"
] | null | null | null |
Projects_Django/MyPets/tienda_mascotas/models.py
|
JuanCasanovaQ/Frameworks_8A
|
353838473b275d366cac8364ef4df72578c0aed8
|
[
"Apache-2.0"
] | null | null | null |
Projects_Django/MyPets/tienda_mascotas/models.py
|
JuanCasanovaQ/Frameworks_8A
|
353838473b275d366cac8364ef4df72578c0aed8
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.db.models.fields.related import ForeignKey
# Create your models here.
class Country(models.Model):
code = models.CharField(max_length=10)
name = models.CharField(max_length=150)
abrev = models.CharField(max_length=4)
status = models.BooleanField()
created_at = models.DateField(auto_now=True)
updatesd_at = models.DateField()
deleted_at = models.DateField()
class User(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
id_identification_type = models.IntegerField()
number_id = models.CharField(max_length=15)
id_city = models.IntegerField()
email = models.CharField(max_length=200)
password = models.CharField(max_length=200)
status = models.BooleanField()
created_at = models.DateField(auto_now=True)
updatesd_at = models.DateField()
deleted_at = models.DateField()
class Session(models.Model):
id_user = models.IntegerField()
ip = models.CharField(max_length=200)
status = models.BooleanField()
created_at = models.DateField(auto_now=True)
updatesd_at = models.DateField()
deleted_at = models.DateField()
class Identification_type(models.Model):
type = models.CharField(max_length=150)
abrev = models.CharField(max_length=4)
created_at = models.DateField(auto_now=True)
updatesd_at = models.DateField()
deleted_at = models.DateField()
class City(models.Model):
code = models.CharField(max_length=10)
name = models.CharField(max_length=150)
abrev = models.CharField(max_length=4)
id_country = models.IntegerField()
status = models.BooleanField()
created_at = models.DateField(auto_now=True)
updatesd_at = models.DateField()
deleted_at = models.DateField()
class Pet(models.Model):
code = models.CharField(max_length=10)
name = models.CharField(max_length=150)
id_user = models.IntegerField()
id_type = models.IntegerField()
id_race = models.IntegerField()
status = models.BooleanField()
created_at = models.DateField(auto_now=True)
updatesd_at = models.DateField()
deleted_at = models.DateField()
class Type(models.Model):
code = models.CharField(max_length=10)
name = models.CharField(max_length=150)
abrev = models.CharField(max_length=4)
status = models.BooleanField()
created_at = models.DateField(auto_now=True)
updatesd_at = models.DateField()
deleted_at = models.DateField()
class Race(models.Model):
code = models.CharField(max_length=10)
name = models.CharField(max_length=150)
abrev = models.CharField(max_length=4)
status = models.BooleanField()
created_at = models.DateField(auto_now=True)
updatesd_at = models.DateField()
deleted_at = models.DateField()
| 32.883721
| 54
| 0.724187
| 2,691
| 0.951556
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 0.009194
|
dd3c339456c728831d00962485678bd366a32bd9
| 34
|
py
|
Python
|
homeassistant/components/limitlessled/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/limitlessled/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/limitlessled/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The limitlessled component."""
| 17
| 33
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.970588
|
dd3c5ef2c1c57128342b4cbe674344dc894fe7e9
| 14,427
|
py
|
Python
|
projectroles/app_settings.py
|
olgabot/sodar_core
|
2a012c962c763fe970261839226e848d752d14d5
|
[
"MIT"
] | null | null | null |
projectroles/app_settings.py
|
olgabot/sodar_core
|
2a012c962c763fe970261839226e848d752d14d5
|
[
"MIT"
] | null | null | null |
projectroles/app_settings.py
|
olgabot/sodar_core
|
2a012c962c763fe970261839226e848d752d14d5
|
[
"MIT"
] | null | null | null |
"""Project and user settings API"""
import json
from projectroles.models import AppSetting, APP_SETTING_TYPES, SODAR_CONSTANTS
from projectroles.plugins import get_app_plugin, get_active_plugins
# SODAR constants
APP_SETTING_SCOPE_PROJECT = SODAR_CONSTANTS['APP_SETTING_SCOPE_PROJECT']
APP_SETTING_SCOPE_USER = SODAR_CONSTANTS['APP_SETTING_SCOPE_USER']
APP_SETTING_SCOPE_PROJECT_USER = SODAR_CONSTANTS[
'APP_SETTING_SCOPE_PROJECT_USER'
]
# Local constants
VALID_SCOPES = [
APP_SETTING_SCOPE_PROJECT,
APP_SETTING_SCOPE_USER,
APP_SETTING_SCOPE_PROJECT_USER,
]
class AppSettingAPI:
@classmethod
def _check_project_and_user(cls, scope, project, user):
"""
Ensure one of the project and user parameters is set.
:param scope: Scope of Setting (USER, PROJECT, PROJECT_USER)
:param project: Project object
:param user: User object
:raise: ValueError if none or both objects exist
"""
if scope == APP_SETTING_SCOPE_PROJECT:
if not project:
raise ValueError('Project unset for setting with project scope')
if user:
raise ValueError('User set for setting with project scope')
elif scope == APP_SETTING_SCOPE_USER:
if project:
raise ValueError('Project set for setting with user scope')
if not user:
raise ValueError('User unset for setting with user scope')
elif scope == APP_SETTING_SCOPE_PROJECT_USER:
if not project:
raise ValueError(
'Project unset for setting with project_user scope'
)
if not user:
raise ValueError(
'User unset for setting with project_user scope'
)
@classmethod
def _check_scope(cls, scope):
"""
Ensure the validity of a scope definition.
:param scope: String
:raise: ValueError if scope is not recognized
"""
if scope not in VALID_SCOPES:
raise ValueError('Invalid scope "{}"'.format(scope))
@classmethod
def _get_json_value(cls, value):
"""
Return JSON value as dict regardless of input type
:param value: Original value (string or dict)
:raise: json.decoder.JSONDecodeError if string value is not valid JSON
:raise: ValueError if value type is not recognized or if value is not
valid JSON
:return: dict
"""
if not value:
return {}
try:
if isinstance(value, str):
return json.loads(value)
else:
json.dumps(value) # Ensure this is valid
return value
except Exception:
raise ValueError('Value is not valid JSON: {}'.format(value))
@classmethod
def _compare_value(cls, setting_obj, input_value):
"""
Compare input value to value in an AppSetting object
:param setting_obj: AppSetting object
:param input_value: Input value (string, int, bool or dict)
:return: Bool
"""
if setting_obj.type == 'JSON':
return setting_obj.value_json == cls._get_json_value(input_value)
elif setting_obj.type == 'BOOLEAN':
# TODO: Also do conversion on input value here if necessary
return bool(int(setting_obj.value)) == input_value
return setting_obj.value == str(input_value)
@classmethod
def get_default_setting(cls, app_name, setting_name, post_safe=False):
"""
Get default setting value from an app plugin.
:param app_name: App name (string, must correspond to "name" in app
plugin)
:param setting_name: Setting name (string)
:param post_safe: Whether a POST safe value should be returned (bool)
:return: Setting value (string, integer or boolean)
:raise: KeyError if nothing is found with setting_name
"""
app_plugin = get_app_plugin(app_name)
if setting_name in app_plugin.app_settings:
if (
post_safe
and app_plugin.app_settings[setting_name]['type'] == 'JSON'
):
return json.dumps(
app_plugin.app_settings[setting_name]['default']
)
return app_plugin.app_settings[setting_name]['default']
raise KeyError(
'Setting "{}" not found in app plugin "{}"'.format(
setting_name, app_name
)
)
@classmethod
def get_app_setting(
cls, app_name, setting_name, project=None, user=None, post_safe=False
):
"""
Return app setting value for a project or an user. If not set, return
default.
:param app_name: App name (string, must correspond to "name" in app
plugin)
:param setting_name: Setting name (string)
:param project: Project object (can be None)
:param user: User object (can be None)
:param post_safe: Whether a POST safe value should be returned (bool)
:return: String or None
:raise: KeyError if nothing is found with setting_name
"""
try:
val = AppSetting.objects.get_setting_value(
app_name, setting_name, project=project, user=user
)
except AppSetting.DoesNotExist:
val = cls.get_default_setting(app_name, setting_name, post_safe)
# Handle post_safe for dict values (JSON)
if post_safe and isinstance(val, dict):
return json.dumps(val)
return val
@classmethod
def get_all_settings(cls, project=None, user=None, post_safe=False):
"""
Return all setting values. If the value is not found, return
the default.
:param project: Project object (can be None)
:param user: User object (can be None)
:param post_safe: Whether POST safe values should be returned (bool)
:return: Dict
:raise: ValueError if neither project nor user are set
"""
if not project and not user:
raise ValueError('Project and user are both unset')
ret = {}
app_plugins = get_active_plugins()
for plugin in app_plugins:
p_settings = cls.get_setting_defs(
APP_SETTING_SCOPE_PROJECT, plugin=plugin
)
for s_key in p_settings:
ret[
'settings.{}.{}'.format(plugin.name, s_key)
] = cls.get_app_setting(
plugin.name, s_key, project, user, post_safe
)
return ret
@classmethod
def get_all_defaults(cls, scope, post_safe=False):
"""
Get all default settings for a scope.
:param scope: Setting scope (PROJECT, USER or PROJECT_USER)
:param post_safe: Whether POST safe values should be returned (bool)
:return: Dict
"""
cls._check_scope(scope)
ret = {}
app_plugins = get_active_plugins()
for plugin in app_plugins:
p_settings = cls.get_setting_defs(scope, plugin=plugin)
for s_key in p_settings:
ret[
'settings.{}.{}'.format(plugin.name, s_key)
] = cls.get_default_setting(plugin.name, s_key, post_safe)
return ret
@classmethod
def set_app_setting(
cls,
app_name,
setting_name,
value,
project=None,
user=None,
validate=True,
):
"""
Set value of an existing project or user settings. Creates the object if
not found.
:param app_name: App name (string, must correspond to "name" in app
plugin)
:param setting_name: Setting name (string)
:param value: Value to be set
:param project: Project object (can be None)
:param user: User object (can be None)
:param validate: Validate value (bool, default=True)
:return: True if changed, False if not changed
:raise: ValueError if validating and value is not accepted for setting
type
:raise: ValueError if neither project nor user are set
:raise: KeyError if setting name is not found in plugin specification
"""
if not project and not user:
raise ValueError('Project and user are both unset')
try:
setting = AppSetting.objects.get(
app_plugin__name=app_name,
name=setting_name,
project=project,
user=user,
)
if cls._compare_value(setting, value):
return False
if validate:
cls.validate_setting(setting.type, value)
if setting.type == 'JSON':
setting.value_json = cls._get_json_value(value)
else:
setting.value = value
setting.save()
return True
except AppSetting.DoesNotExist:
app_plugin = get_app_plugin(app_name)
if setting_name not in app_plugin.app_settings:
raise KeyError(
'Setting "{}" not found in app plugin "{}"'.format(
setting_name, app_name
)
)
s_def = app_plugin.app_settings[setting_name]
s_type = s_def['type']
s_mod = (
bool(s_def['user_modifiable'])
if 'user_modifiable' in s_def
else True
)
cls._check_scope(s_def['scope'])
cls._check_project_and_user(s_def['scope'], project, user)
if validate:
v = cls._get_json_value(value) if s_type == 'JSON' else value
cls.validate_setting(s_type, v)
s_vals = {
'app_plugin': app_plugin.get_model(),
'project': project,
'user': user,
'name': setting_name,
'type': s_type,
'user_modifiable': s_mod,
}
if s_type == 'JSON':
s_vals['value_json'] = cls._get_json_value(value)
else:
s_vals['value'] = value
AppSetting.objects.create(**s_vals)
return True
@classmethod
def validate_setting(cls, setting_type, setting_value):
"""
Validate setting value according to its type.
:param setting_type: Setting type
:param setting_value: Setting value
:raise: ValueError if setting_type or setting_value is invalid
"""
if setting_type not in APP_SETTING_TYPES:
raise ValueError('Invalid setting type "{}"'.format(setting_type))
elif setting_type == 'BOOLEAN':
if not isinstance(setting_value, bool):
raise ValueError(
'Please enter a valid boolean value ({})'.format(
setting_value
)
)
elif setting_type == 'INTEGER':
if (
not isinstance(setting_value, int)
and not str(setting_value).isdigit()
):
raise ValueError(
'Please enter a valid integer value ({})'.format(
setting_value
)
)
elif setting_type == 'JSON':
try:
json.dumps(setting_value)
except TypeError:
raise ValueError(
'Please enter valid JSON ({})'.format(setting_value)
)
return True
@classmethod
def get_setting_def(cls, name, plugin=None, app_name=None):
"""
Return definition for a single app setting, either based on an app name
or the plugin object.
:param name: Setting name
:param plugin: Plugin object extending ProjectAppPluginPoint
:param app_name: Name of the app plugin (string)
:return: Dict
:raise: ValueError if neither app_name or plugin are set or if setting
is not found in plugin
"""
if not plugin and not app_name:
raise ValueError('Plugin and app name both unset')
elif not plugin:
plugin = get_app_plugin(app_name)
if not plugin:
raise ValueError(
'Plugin not found with app name "{}"'.format(app_name)
)
if name not in plugin.app_settings:
raise ValueError(
'App setting not found in app "{}" with name "{}"'.format(
plugin.name, name
)
)
return plugin.app_settings[name]
@classmethod
def get_setting_defs(
cls, scope, plugin=False, app_name=False, user_modifiable=False
):
"""
Return app setting definitions of a specific scope from a plugin.
:param scope: PROJECT, USER or PROJECT_USER
:param plugin: project app plugin object extending ProjectAppPluginPoint
:param app_name: Name of the app plugin (string)
:param user_modifiable: Only return modifiable settings if True
(boolean)
:return: Dict
:raise: ValueError if scope is invalid or if if neither app_name or
plugin are set
"""
if not plugin and not app_name:
raise ValueError('Plugin and app name both unset')
if not plugin:
plugin = get_app_plugin(app_name)
if not plugin:
raise ValueError(
'Plugin not found with app name "{}"'.format(app_name)
)
cls._check_scope(scope)
return {
k: v
for k, v in plugin.app_settings.items()
if (
'scope' in v
and v['scope'] == scope
and (
not user_modifiable
or (
'user_modifiable' not in v
or v['user_modifiable'] is True
)
)
)
}
| 32.938356
| 80
| 0.561932
| 13,846
| 0.959728
| 0
| 0
| 13,755
| 0.953421
| 0
| 0
| 5,858
| 0.406044
|
dd3d84abfc37890e97980406a58c52b188bedbc3
| 2,835
|
py
|
Python
|
util/2mass_catalog.py
|
spake/astrometry.net
|
12c76f4a44fe90a009eeb962f2ae28b0791829b8
|
[
"BSD-3-Clause"
] | 4
|
2018-02-13T23:11:40.000Z
|
2021-09-30T16:02:22.000Z
|
util/2mass_catalog.py
|
spake/astrometry.net
|
12c76f4a44fe90a009eeb962f2ae28b0791829b8
|
[
"BSD-3-Clause"
] | null | null | null |
util/2mass_catalog.py
|
spake/astrometry.net
|
12c76f4a44fe90a009eeb962f2ae28b0791829b8
|
[
"BSD-3-Clause"
] | 1
|
2019-02-11T06:56:30.000Z
|
2019-02-11T06:56:30.000Z
|
#! /usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import sys
from optparse import OptionParser
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
from numpy import *
from astrometry.util.fits import *
from astrometry.util.healpix import *
from astrometry.util.starutil_numpy import *
def get_2mass_sources(ra, dec, radius=1, basefn=None):
twomass_nside = 9
if basefn is None:
twomass_pat = '2mass_hp%03i.fits'
else:
twomass_pat = basefn
hps = healpix_rangesearch(ra, dec, radius, twomass_nside)
print('2MASS healpixes in range:', hps)
allU = None
for hp in hps:
fn = twomass_pat % hp
print('2MASS filename:', fn)
U = fits_table(fn)
print(len(U), 'sources')
I = (degrees_between(ra, dec, U.ra, U.dec) < radius)
print('%i 2MASS stars within range.' % sum(I))
U = U[I]
if allU is None:
allU = U
else:
allU.append(U)
return allU
if __name__ == '__main__':
parser = OptionParser(usage='%prog [options] <ra> <dec> <output-filename>')
parser.add_option('-r', dest='radius', type='float', help='Search radius, in deg (default 1 deg)')
parser.add_option('-b', dest='basefn', help='Base filename of 2MASS FITS files (default: 2mass_hp%03i.fits)')
parser.add_option('-B', dest='band', help='Band (J, H, or K) to use for cuts')
parser.set_defaults(radius=1.0, basefn=None, band='J')
(opt, args) = parser.parse_args()
if len(args) != 3:
parser.print_help()
print()
print('Got extra arguments:', args)
sys.exit(-1)
# parse RA,Dec.
ra = float(args[0])
dec = float(args[1])
outfn = args[2]
band = opt.band.lower()
# ugh!
opts = {}
for k in ['radius', 'basefn']:
opts[k] = getattr(opt, k)
X = get_2mass_sources(ra, dec, **opts)
print('Got %i 2MASS sources.' % len(X))
#print X.about()
print('Applying cuts...')
I = logical_not(X.minor_planet)
print('not minor planet:', sum(I))
qual = X.get(band + '_quality')
# work around dumb bug where it's a single-char column rather than a byte.
nobrightness = chr(0)
I = logical_and(I, (qual != nobrightness))
print('not NO_BRIGHTNESS', sum(I))
print(len(X))
print(len(X.j_cc))
cc = array(X.getcolumn(band + '_cc'))
ccnone = chr(0)
#print 'cc shape', cc.shape
#print cc[:10]
#print ccnone
I = logical_and(I, (cc == ccnone))
print('CC_NONE', sum(I))
X = X[I]
print('%i pass cuts' % len(X))
print('Writing to', outfn)
X.write_to(outfn)
| 28.35
| 113
| 0.610229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 801
| 0.28254
|
dd3f4f79ce1d8a927e706c3ca5d870ec9910cd7c
| 682
|
py
|
Python
|
models/nicknames.py
|
Tyson-Chicken-Nuggets/me-discord-leaderboard
|
d0e04c77e4f7a309cbb6315d24bd47929ba4ec54
|
[
"MIT"
] | 4
|
2018-12-13T04:15:26.000Z
|
2021-02-15T21:46:59.000Z
|
models/nicknames.py
|
Tyson-Chicken-Nuggets/me-discord-leaderboard
|
d0e04c77e4f7a309cbb6315d24bd47929ba4ec54
|
[
"MIT"
] | 2
|
2019-05-17T18:47:18.000Z
|
2020-09-26T01:31:39.000Z
|
models/nicknames.py
|
Tyson-Chicken-Nuggets/me-discord-leaderboard
|
d0e04c77e4f7a309cbb6315d24bd47929ba4ec54
|
[
"MIT"
] | 1
|
2018-06-08T17:08:29.000Z
|
2018-06-08T17:08:29.000Z
|
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from models.base import Base
from models.servers import Server
from models.users import User
class Nickname(Base):
__tablename__ = 'nicknames'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
server_id = Column(Integer, ForeignKey('servers.id'), nullable=False)
user = relationship(User)
server = relationship(Server)
display_name = Column(String)
def __init__(self, user, server, display_name):
self.user = user
self.server = server
self.display_name = display_name
| 31
| 73
| 0.727273
| 487
| 0.714076
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.048387
|
dd3fd65b348e0bcff47a6b9cda24ca0e585cfa0c
| 6,749
|
py
|
Python
|
pirates/leveleditor/worldData/tortuga_building_int_10.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/tortuga_building_int_10.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/tortuga_building_int_10.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.tortuga_building_int_10
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'AmbientColors': {}, 'DirectionalColors': {}, 'FogColors': {}, 'FogRanges': {}, 'Objects': {'1156279496.29dzlu0': {'Type': 'Building Interior', 'Name': '', 'AdditionalData': ['interior_shanty_store_tattoo'], 'Instanced': True, 'Objects': {'1169067906.19mike': {'Type': 'Townsperson', 'Category': 'Commoner', 'AnimSet': 'tatoo', 'CustomModel': 'None', 'GhostColor': 'None', 'GhostFX': 0, 'Greeting Animation': '', 'Hpr': VBase3(-102.509, 1.642, 0.0), 'Level': '37', 'Notice Animation 1': '', 'Notice Animation 2': '', 'Patrol Radius': '12.0000', 'Pos': Point3(6.941, -8.134, -0.178), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'PropLeft': 'None', 'PropRight': 'None', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'PORT_ROYAL_DEFAULTS', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Villager', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None'}, '1179360000.0dchiappe': {'Type': 'Furniture', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(19.675, 0.0, 0.0), 'Pos': Point3(7.057, -8.171, 0.0), 'Scale': VBase3(1.023, 1.023, 1.023), 'VisSize': '', 'Visual': {'Model': 'models/props/stool_shanty'}}, '1179360000.0dchiappe0': {'Type': 'Furniture', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-29.781, 0.0, 0.0), 'Objects': {}, 'Pos': Point3(7.771, -4.618, -0.007), 'Scale': VBase3(0.965, 0.965, 0.965), 'VisSize': '', 'Visual': {'Model': 'models/props/stool_shanty'}}, '1179360256.0dchiappe': {'Type': 'Townsperson', 'Category': 'Commoner', 'AnimSet': 'tatoo_receive', 'CustomModel': 'None', 'GhostColor': 'None', 'GhostFX': 0, 'Greeting Animation': '', 'HelpID': 'NONE', 'Holiday': '', 'Hpr': VBase3(-125.548, -4.22, -0.497), 'Level': '37', 'Notice Animation 1': '', 'Notice Animation 2': '', 'Patrol Radius': '12.0000', 'Pos': Point3(7.165, -4.221, 0.093), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'PropLeft': 'None', 'PropRight': 'None', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'PORT_ROYAL_DEFAULTS', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Villager', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'VisSize': ''}, '1201025152.0dxschafe': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(-180.0, 0.0, 0.0), 'Pos': Point3(0.044, -4.421, 5.0), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1201025280.0dxschafe': {'Type': 'Townsperson', 'Category': 'Tattoo', 'AnimSet': 'primp', 'CustomModel': 'None', 'GhostColor': 'None', 'GhostFX': 0, 'Greeting Animation': '', 'Hpr': VBase3(-163.551, 0.0, 0.0), 'Level': '37', 'Notice Animation 1': '', 'Notice Animation 2': '', 'Patrol Radius': '1.0000', 'Pos': Point3(16.835, 12.638, 0.0), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'PropLeft': 'None', 'PropRight': 'None', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'TORTUGA_DEFAULTS', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Villager', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None'}, '1201122985.55dxschafe': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '15.0000', 'DropOff': '45.5422', 'FlickRate': '0.5000', 'Flickering': False, 'Hpr': VBase3(170.832, -20.397, 4.921), 'Intensity': '1.0602', 'LightType': 'SPOT', 'Pos': Point3(19.411, 22.344, 13.063), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}, '1201124068.83dxschafe': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '29.8193', 'FlickRate': '0.5000', 'Flickering': False, 'Hpr': VBase3(-119.397, -38.379, 0.0), 'Intensity': '0.9639', 'LightType': 'SPOT', 'Pos': Point3(1.086, 14.825, 18.821), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}}, 'Visual': {'Model': 'models/buildings/interior_shanty_store'}}}, 'Node Links': [], 'Layers': {'Collisions': ['1184008208.59kmuller', '1184016064.62kmuller', '1184013852.84kmuller', '1185822696.06kmuller', '1184006140.32kmuller', '1184002350.98kmuller', '1184007573.29kmuller', '1184021176.59kmuller', '1184005963.59kmuller', '1188324241.31akelts', '1184006537.34kmuller', '1184006605.81kmuller', '1187139568.33kmuller', '1188324186.98akelts', '1184006730.66kmuller', '1184007538.51kmuller', '1184006188.41kmuller', '1184021084.27kmuller', '1185824396.94kmuller', '1185824250.16kmuller', '1185823630.52kmuller', '1185823760.23kmuller', '1185824497.83kmuller', '1185824751.45kmuller', '1187739103.34akelts', '1188323993.34akelts', '1184016538.29kmuller', '1185822200.97kmuller', '1184016225.99kmuller', '1195241421.34akelts', '1195242796.08akelts', '1184020642.13kmuller', '1195237994.63akelts', '1184020756.88kmuller', '1184020833.4kmuller', '1185820992.97kmuller', '1185821053.83kmuller', '1184015068.54kmuller', '1184014935.82kmuller', '1185821432.88kmuller', '1185821701.86kmuller', '1195240137.55akelts', '1195241539.38akelts', '1195238422.3akelts', '1195238473.22akelts', '1185821453.17kmuller', '1184021269.96kmuller', '1185821310.89kmuller', '1185821165.59kmuller', '1185821199.36kmuller', '1185822035.98kmuller', '1184015806.59kmuller', '1185822059.48kmuller', '1185920461.76kmuller', '1194984449.66akelts', '1185824206.22kmuller', '1184003446.23kmuller', '1184003254.85kmuller', '1184003218.74kmuller', '1184002700.44kmuller', '1186705073.11kmuller', '1187658531.86akelts', '1186705214.3kmuller', '1185824927.28kmuller', '1184014204.54kmuller', '1184014152.84kmuller']}, 'ObjectIds': {'1156279496.29dzlu0': '["Objects"]["1156279496.29dzlu0"]', '1169067906.19mike': '["Objects"]["1156279496.29dzlu0"]["Objects"]["1169067906.19mike"]', '1179360000.0dchiappe': '["Objects"]["1156279496.29dzlu0"]["Objects"]["1179360000.0dchiappe"]', '1179360000.0dchiappe0': '["Objects"]["1156279496.29dzlu0"]["Objects"]["1179360000.0dchiappe0"]', '1179360256.0dchiappe': '["Objects"]["1156279496.29dzlu0"]["Objects"]["1179360256.0dchiappe"]', '1201025152.0dxschafe': '["Objects"]["1156279496.29dzlu0"]["Objects"]["1201025152.0dxschafe"]', '1201025280.0dxschafe': '["Objects"]["1156279496.29dzlu0"]["Objects"]["1201025280.0dxschafe"]', '1201122985.55dxschafe': '["Objects"]["1156279496.29dzlu0"]["Objects"]["1201122985.55dxschafe"]', '1201124068.83dxschafe': '["Objects"]["1156279496.29dzlu0"]["Objects"]["1201124068.83dxschafe"]'}}
extraInfo = {'camPos': Point3(295.478, -101.859, 7.30613), 'camHpr': VBase3(66.8195, -6.16144, 0), 'focalLength': 1.39999997616, 'skyState': 2, 'fog': 0}
| 964.142857
| 6,298
| 0.676397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,049
| 0.748111
|
dd409f1079701595dd303cbae441bb3663ea68de
| 755
|
py
|
Python
|
hgtools/managers/library.py
|
jaraco/hgtools
|
1090d139e5dbdab864da8f1917a9e674331b6f9b
|
[
"MIT"
] | 1
|
2017-05-17T15:12:29.000Z
|
2017-05-17T15:12:29.000Z
|
hgtools/managers/library.py
|
jaraco/hgtools
|
1090d139e5dbdab864da8f1917a9e674331b6f9b
|
[
"MIT"
] | 12
|
2016-01-01T14:43:44.000Z
|
2021-10-03T02:13:19.000Z
|
hgtools/managers/library.py
|
jaraco/hgtools
|
1090d139e5dbdab864da8f1917a9e674331b6f9b
|
[
"MIT"
] | null | null | null |
import sys
from . import base
from . import cmd
from . import reentry
class MercurialInProcManager(cmd.Mercurial, base.RepoManager):
"""
A RepoManager implemented by invoking the hg command in-process.
"""
def _invoke(self, *params):
"""
Run the self.exe command in-process with the supplied params.
"""
cmd = [self.exe, '-R', self.location] + list(params)
with reentry.in_process_context(cmd) as result:
sys.modules['mercurial.dispatch'].run()
stdout = result.stdio.stdout.getvalue()
stderr = result.stdio.stderr.getvalue()
if not result.returncode == 0:
raise RuntimeError(stderr.strip() or stdout.strip())
return stdout.decode('utf-8')
| 30.2
| 69
| 0.637086
| 681
| 0.901987
| 0
| 0
| 0
| 0
| 0
| 0
| 196
| 0.259603
|
dd40a6f098a3b0e41331e54d98f742cb251a3af3
| 1,882
|
py
|
Python
|
app/core/tests/test_models.py
|
royandri/attendance
|
cecb7a91b5e048569299ae6dfb796c20051a16a9
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
royandri/attendance
|
cecb7a91b5e048569299ae6dfb796c20051a16a9
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
royandri/attendance
|
cecb7a91b5e048569299ae6dfb796c20051a16a9
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
from django.utils import timezone
def sample_user(email='test@mail.com', password='testpass'):
# Create a sample user
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successfull(self):
# Test creatiing a new user with an email is successfull
email = 'royandri.dev@gmail.com'
password = 'admin'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
# Test the email form a new user is normalized
email = 'royandri@GMAIL.COM'
user = get_user_model().objects.create_user(email, 'admin')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
# Test creating user with no email raises error
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'admin')
def test_create_new_superuser(self):
# Test creating a new superuser
user = get_user_model().objects.create_superuser(
'royandri.dev@gmail.com',
'admin'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_attendance_str(self):
# Test the attendance representation
attendance = models.Attendance.objects.create(
user=sample_user(),
time_in=timezone.localtime(timezone.now()).strftime('%H:%M:%S'),
date_in=timezone.localtime(timezone.now()).strftime('%Y-%m-%d')
)
self.assertEqual(str(attendance), attendance.date_in)
| 33.607143
| 76
| 0.667906
| 1,586
| 0.842721
| 0
| 0
| 0
| 0
| 0
| 0
| 379
| 0.201382
|
dd41e5e1e67e9d900eb2ff0bece445448ea41207
| 1,775
|
py
|
Python
|
controllers/__controller.py
|
VNCompany/vnforum
|
770aca3a94ad1ed54628d48867c299d83215f75a
|
[
"Unlicense"
] | null | null | null |
controllers/__controller.py
|
VNCompany/vnforum
|
770aca3a94ad1ed54628d48867c299d83215f75a
|
[
"Unlicense"
] | null | null | null |
controllers/__controller.py
|
VNCompany/vnforum
|
770aca3a94ad1ed54628d48867c299d83215f75a
|
[
"Unlicense"
] | null | null | null |
from flask import redirect, url_for, render_template
from flask_login import current_user
from components.pagination import html_pagination
from db_session import create_session
class Controller:
__view__ = None
__title__ = "Page"
view_includes = {}
jquery_enabled = True
db_session = None
def __init__(self):
self.view_includes.clear()
self.view_includes["css"] = ""
self.css("main.css")
self.view_includes["js"] = ""
self.javascript("jquery.js", "main.js")
self.db_session = create_session()
@staticmethod
def static(path: str):
return url_for('static', filename=path)
def view(self, **kwargs):
if self.__view__ is None:
raise AttributeError
elif current_user.is_authenticated and current_user.is_banned():
return redirect("/logout")
else:
return render_template(str(self.__view__).replace(".", "/") + ".html",
**kwargs,
**self.view_includes,
title=self.__title__)
def css(self, *names):
if "css" not in self.view_includes.keys():
self.view_includes["css"] = ""
for name in names:
self.view_includes["css"] += f'<link type="text/css" rel="stylesheet" href="' \
f'{self.static("css/" + name)}">\n'
def javascript(self, *names):
for name in names:
self.view_includes["js"] += f'<script type="text/javascript" src="{self.static("js/" + name)}"></script>\n'
def pagination(self, max_page, pos: int, link: str):
self.view_includes["pagination_string"] = html_pagination(max_page, pos, link)
| 34.803922
| 119
| 0.580845
| 1,594
| 0.898028
| 0
| 0
| 88
| 0.049577
| 0
| 0
| 281
| 0.15831
|
dd43d5641662aeed07a7ef83b6967c23588c8ac3
| 371
|
py
|
Python
|
practice/algos/quick_sort/quick_sort.py
|
markfranciose/cheat_sheet_of_cheat_sheets
|
d126bbbb499af43098d3938e35df3381833cecac
|
[
"MIT"
] | null | null | null |
practice/algos/quick_sort/quick_sort.py
|
markfranciose/cheat_sheet_of_cheat_sheets
|
d126bbbb499af43098d3938e35df3381833cecac
|
[
"MIT"
] | 20
|
2020-07-21T01:52:53.000Z
|
2022-02-27T09:44:44.000Z
|
practice/algos/quick_sort/quick_sort.py
|
markfranciose/drops_of_knowledge
|
d126bbbb499af43098d3938e35df3381833cecac
|
[
"MIT"
] | null | null | null |
def quick_sort(arr):
if len(arr) < 2:
return arr
pivot = arr.pop()
left = []
right = []
for num in arr:
if num > pivot:
right.append(num)
else:
left.append(num)
return quick_sort(left) + [pivot] + quick_sort(right)
arr = [1,2,2,1,2,1,5,23,1,91,2,4,1,]
print(quick_sort(arr))
| 18.55
| 58
| 0.490566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dd442063bf90797178fddb15d68e9d7b4d342147
| 157
|
py
|
Python
|
Python/texttospeech.py
|
Rohit01-pro/All_Program_helper
|
86b75ecc4ecb095f11e46e6f80c660e27dd22f27
|
[
"MIT"
] | 16
|
2021-10-03T11:15:49.000Z
|
2021-10-31T04:40:24.000Z
|
Python/texttospeech.py
|
Rohit01-pro/All_Program_helper
|
86b75ecc4ecb095f11e46e6f80c660e27dd22f27
|
[
"MIT"
] | 232
|
2021-10-02T14:51:43.000Z
|
2021-11-14T08:23:27.000Z
|
Python/texttospeech.py
|
Rohit01-pro/All_Program_helper
|
86b75ecc4ecb095f11e46e6f80c660e27dd22f27
|
[
"MIT"
] | 166
|
2021-10-02T13:56:34.000Z
|
2021-10-31T17:56:34.000Z
|
from gtts import gTTS
import os
f=open("1.txt")
x=f.read()
language='en'
audio=gTTS(text=x,lang=language,slow=False)
audio.save("1.wav")
os.system("1.wav")
| 15.7
| 43
| 0.707006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.159236
|
dd464fa4b37cf124b28327d6a98fc0f0c806f33d
| 101
|
py
|
Python
|
code/pyFoamSymlinkToFile.py
|
sosohungry/pyfoam
|
b19e40a0ef1f41268930122226660414722178e6
|
[
"MIT"
] | null | null | null |
code/pyFoamSymlinkToFile.py
|
sosohungry/pyfoam
|
b19e40a0ef1f41268930122226660414722178e6
|
[
"MIT"
] | null | null | null |
code/pyFoamSymlinkToFile.py
|
sosohungry/pyfoam
|
b19e40a0ef1f41268930122226660414722178e6
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
from PyFoam.Applications.SymlinkToFile import SymlinkToFile
SymlinkToFile()
| 16.833333
| 59
| 0.811881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.217822
|
dd468535a193a7786f5ac49b546150a18ebcd261
| 1,172
|
py
|
Python
|
setup.py
|
themightyoarfish/svcca
|
23faa374489067c1c76cee44d92663c120603bdc
|
[
"Apache-2.0"
] | 8
|
2019-01-17T14:20:07.000Z
|
2021-07-08T12:16:23.000Z
|
setup.py
|
themightyoarfish/svcca
|
23faa374489067c1c76cee44d92663c120603bdc
|
[
"Apache-2.0"
] | 1
|
2019-01-30T11:44:25.000Z
|
2019-02-07T15:02:02.000Z
|
setup.py
|
themightyoarfish/svcca-gpu
|
23faa374489067c1c76cee44d92663c120603bdc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
import setuptools
import os
root_dir = os.path.abspath(os.path.dirname(__file__))
with open(f'{root_dir}/README.md') as f:
readme = f.read()
with open(f'{root_dir}/requirements.txt') as f:
requirements = f.read().split()
packages = setuptools.find_packages('.', include='svcca.*')
setup(name='svcca',
version='0.0.1',
description='SVCCA on Numpy, Cupy, and PyTorch',
long_description=readme,
author='Rasmus Diederichsen',
author_email='rasmus@peltarion.com',
url='https://github.com/themightyoarfish/svcca-gpu',
classifiers=['Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Visualization',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache License',
'Intended Audience :: Developers',
],
keywords='deep-learning pytorch cupy numpy svcca neural-networks machine-learning'.split(),
install_requires=requirements,
packages=packages,
zip_safe=False, # don't install egg, but source
)
| 33.485714
| 97
| 0.636519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 533
| 0.454778
|
dd470b99c72201568c8abe1bba1e9f7add60848f
| 1,679
|
py
|
Python
|
classmark/ui/home_section.py
|
mdocekal/ClassMark
|
e6019f9abeb99e9a6b72365a508d5a6dac13c3c7
|
[
"Unlicense"
] | null | null | null |
classmark/ui/home_section.py
|
mdocekal/ClassMark
|
e6019f9abeb99e9a6b72365a508d5a6dac13c3c7
|
[
"Unlicense"
] | 2
|
2021-01-18T12:29:18.000Z
|
2021-01-18T14:33:31.000Z
|
classmark/ui/home_section.py
|
windionleaf/ClassMark
|
e6019f9abeb99e9a6b72365a508d5a6dac13c3c7
|
[
"Unlicense"
] | null | null | null |
"""
Created on 19. 12. 2018
Module for home section of the application.
:author: Martin Dočekal
:contact: xdocek09@stud.fit.vubtr.cz
"""
from .widget_manager import WidgetManager
from .section_router import SectionRouter
from functools import partial
from .models import ListLastExperiments
from PySide2.QtCore import Qt
class HomeSection(WidgetManager):
"""
HomeSection manager class.
"""
TEMPLATE="home"
"""Corresponding template name."""
def __init__(self, sectionRouter:SectionRouter, parent=None):
"""
Initializes home section.
:param sectionRouter: Router for in app navigation.
:type sectionRouter: SectionRouter
:param parent: Parent widget
:type parent: QWidget
"""
super().__init__()
self._widget=self._loadTemplate(self.TEMPLATE, parent)
self._router=sectionRouter
#register click events
self._widget.toolNewExperiment.clicked.connect(partial(self._router.goExperiment, None))
self._widget.toolLoadExperiment.clicked.connect(self._router.goLoadExperiment)
#self._widget.findChild(QToolButton, 'toolNewExperiment')
#last experiements
self._widget.lastExpList.setModel(ListLastExperiments())
self._widget.lastExpList.doubleClicked.connect(self.lastExperSel)
def lastExperSel(self, index):
"""
Last experiment was selected.
:param index: Index.
:type index: QModelIndex
"""
self._router.goExperiment(self._widget.lastExpList.model().data(index, Qt.DisplayRole))
| 29.45614
| 96
| 0.661108
| 1,339
| 0.797024
| 0
| 0
| 0
| 0
| 0
| 0
| 676
| 0.402381
|
dd4835795e462053f9d98a0abafa853d67dd9bfc
| 829
|
py
|
Python
|
urls.py
|
CodeForPhilly/philly_legislative
|
5774100325b5374a0510674b4a542171fff3fcd3
|
[
"BSD-Source-Code"
] | 2
|
2017-08-29T22:27:05.000Z
|
2019-04-27T20:21:31.000Z
|
urls.py
|
CodeForPhilly/philly_legislative
|
5774100325b5374a0510674b4a542171fff3fcd3
|
[
"BSD-Source-Code"
] | null | null | null |
urls.py
|
CodeForPhilly/philly_legislative
|
5774100325b5374a0510674b4a542171fff3fcd3
|
[
"BSD-Source-Code"
] | null | null | null |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
#(r'^philly_legislative/', include('philly_legislative.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/(.*)', admin.site.root),
(r'^subs/$', 'phillyleg.views.index'),
(r'^subs/create/$', 'phillyleg.views.create'),
(r'^subs/unsubscribe/$', 'phillyleg.views.unsubscribe'),
#(r'^subs/(?P<subscription_id>\d+)/$', 'phillyleg.views.edit'),
(r'^subs/delete/$', 'phillyleg.views.delete')
)
| 33.16
| 76
| 0.671894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 602
| 0.726176
|
dd486d1d0f1328a725ad7af4079cf4b9fc30ab88
| 2,510
|
py
|
Python
|
irf/scripts/read_corsika_headers.py
|
fact-project/irf
|
d82a3d4ae8b9ef15d9f473cdcd01a5f9c92d42a2
|
[
"MIT"
] | null | null | null |
irf/scripts/read_corsika_headers.py
|
fact-project/irf
|
d82a3d4ae8b9ef15d9f473cdcd01a5f9c92d42a2
|
[
"MIT"
] | 8
|
2017-04-25T11:19:32.000Z
|
2019-05-28T07:24:32.000Z
|
irf/scripts/read_corsika_headers.py
|
fact-project/irf
|
d82a3d4ae8b9ef15d9f473cdcd01a5f9c92d42a2
|
[
"MIT"
] | null | null | null |
from corsikaio import CorsikaFile
from fact.io import to_h5py
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
import os
import click
import pandas as pd
import numpy as np
from glob import glob
def get_headers(f):
with CorsikaFile(f) as cf:
run_header, event_headers, run_end = cf.read_headers()
return run_header, event_headers, run_end
event_columns = [
'run_number',
'event_number',
'particle_id',
'total_energy',
'starting_altitude',
'first_target_id',
'first_interaction_height',
'momentum_x',
'momentum_y',
'momentum_minus_z',
'zenith',
'azimuth',
]
run_header_columns = [
'run_number',
'date',
'energy_spectrum_slope',
'energy_min',
'energy_max',
]
@click.command()
@click.argument('outputfile')
@click.argument(
'inputdir',
nargs=-1,
type=click.Path(exists=True, file_okay=False, dir_okay=True),
)
def main(outputfile, inputdir):
inputfiles = []
for d in inputdir:
inputfiles.extend(glob(os.path.join(d, 'cer*')))
for f in inputfiles[:]:
if f + '.gz' in inputfiles:
inputfiles.remove(f + '.gz')
print('Processing', len(inputfiles), 'files')
with Pool(cpu_count()) as pool:
results = pool.imap_unordered(get_headers, inputfiles)
run_headers = []
run_ends = []
for run_header, event_headers, run_end in tqdm(results, total=len(inputfiles)):
run_headers.append(run_header)
run_ends.append(run_end)
df = pd.DataFrame(event_headers[event_columns])
to_h5py(df, outputfile, key='corsika_events', mode='a')
print('saving runwise information')
runs = pd.DataFrame(np.array(run_headers)[run_header_columns])
# some runs might have failed and thus no run end block
for run_end in run_ends:
if run_end is not None:
dtype = run_end.dtype
break
else:
raise IOError('All run_end blocks are None, all runs failed.')
dummy = np.array([(b'RUNE', np.nan, np.nan)], dtype=dtype)[0]
run_ends = [r if r is not None else dummy for r in run_ends]
run_ends = np.array(run_ends)
print('Number of failed runs:', np.count_nonzero(np.isnan(run_ends['n_events'])))
runs['n_events'] = run_ends['n_events']
to_h5py(runs, outputfile, key='corsika_runs', mode='a')
print('done')
if __name__ == '__main__':
main()
| 25.1
| 89
| 0.632669
| 0
| 0
| 0
| 0
| 1,704
| 0.678884
| 0
| 0
| 539
| 0.214741
|
dd489e1ae7bc915c79a5ee9217ee84f8b92fa142
| 3,101
|
py
|
Python
|
tests/test_maker.py
|
chfsx/pibooth
|
4049fb748853ff90e53212336ff7d71bba6c1328
|
[
"MIT"
] | null | null | null |
tests/test_maker.py
|
chfsx/pibooth
|
4049fb748853ff90e53212336ff7d71bba6c1328
|
[
"MIT"
] | null | null | null |
tests/test_maker.py
|
chfsx/pibooth
|
4049fb748853ff90e53212336ff7d71bba6c1328
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from pibooth.pictures.factory import PilPictureFactory, OpenCvPictureFactory
footer_texts = ('This is the main title', 'Footer text 2', 'Footer text 3')
footer_fonts = ('Amatic-Bold', 'DancingScript-Regular', 'Roboto-LightItalic')
footer_colors = ((10, 0, 0), (0, 50, 0), (0, 50, 50))
def setup_factory(m, fond, overlay=''):
m.add_text(footer_texts[0], footer_fonts[0], footer_colors[0], 'left')
m.add_text(footer_texts[1], footer_fonts[1], footer_colors[1], 'center')
m.add_text(footer_texts[2], footer_fonts[2], footer_colors[2], 'right')
m.set_background(fond)
if overlay:
m.set_overlay(overlay)
def test_benchmark_pil_portrait(benchmark, captures_portrait, fond):
factory = PilPictureFactory(2400, 3600, *captures_portrait)
setup_factory(factory, fond)
benchmark(factory.build)
def test_benchmark_pil_landscape(benchmark, captures_landscape, fond):
factory = PilPictureFactory(3600, 2400, *captures_landscape)
setup_factory(factory, fond)
benchmark(factory.build)
def test_benchmark_cv2_portrait(benchmark, captures_portrait, fond):
factory = OpenCvPictureFactory(2400, 3600, *captures_portrait)
setup_factory(factory, fond)
benchmark(factory.build)
def test_benchmark_cv2_landscape(benchmark, captures_landscape, fond):
factory = OpenCvPictureFactory(3600, 2400, *captures_landscape)
setup_factory(factory, fond)
benchmark(factory.build)
@pytest.mark.parametrize('captures_nbr', [1, 2, 3, 4])
def test_save_cv2_portrait(captures_nbr, captures_portrait, fond):
factory = OpenCvPictureFactory(2400, 3600, *captures_portrait[:captures_nbr])
setup_factory(factory, fond)
factory.save("OpenCV-portrait-{}.jpg".format(captures_nbr))
@pytest.mark.parametrize('captures_nbr', [1, 2, 3, 4])
def test_save_cv2_landscape(captures_nbr, captures_landscape, fond):
factory = OpenCvPictureFactory(3600, 2400, *captures_landscape[:captures_nbr])
setup_factory(factory, fond)
factory.save("OpenCV-landscape-{}.jpg".format(captures_nbr))
@pytest.mark.parametrize('captures_nbr', [1, 2, 3, 4])
def test_save_pil_portrait(captures_nbr, captures_portrait, fond):
factory = PilPictureFactory(2400, 3600, *captures_portrait[:captures_nbr])
setup_factory(factory, fond)
factory.save("PIL-portrait-{}.jpg".format(captures_nbr))
@pytest.mark.parametrize('captures_nbr', [1, 2, 3, 4])
def test_save_pil_landscape(captures_nbr, captures_landscape, fond):
factory = PilPictureFactory(3600, 2400, *captures_landscape[:captures_nbr])
setup_factory(factory, fond)
factory.save("PIL-landscape-{}.jpg".format(captures_nbr))
def test_save_pil_overlay(captures_landscape, fond, overlay):
factory = PilPictureFactory(3600, 2400, *captures_landscape)
setup_factory(factory, fond, overlay)
factory.save("PIL-overlay-4.jpg")
def test_save_cv2_overlay(captures_landscape, fond, overlay):
factory = OpenCvPictureFactory(3600, 2400, *captures_landscape)
setup_factory(factory, fond, overlay)
factory.save("OpenCV-overlay-4.jpg")
| 37.817073
| 82
| 0.751048
| 0
| 0
| 0
| 0
| 1,196
| 0.385682
| 0
| 0
| 345
| 0.111254
|
dd491d9bbf97708bde610843ff7316857a2a3334
| 6,452
|
py
|
Python
|
assignment 1/question3/q3.py
|
Eunoia1729/soft-computing
|
d7fc155378d1bb0b914a6f660095653e32d2c0b8
|
[
"Apache-2.0"
] | 1
|
2021-11-14T15:02:35.000Z
|
2021-11-14T15:02:35.000Z
|
assignment 1/question3/q3.py
|
Eunoia1729/soft-computing
|
d7fc155378d1bb0b914a6f660095653e32d2c0b8
|
[
"Apache-2.0"
] | null | null | null |
assignment 1/question3/q3.py
|
Eunoia1729/soft-computing
|
d7fc155378d1bb0b914a6f660095653e32d2c0b8
|
[
"Apache-2.0"
] | null | null | null |
"""## Question 3: Scrap Hotel Data
The below code is for India and can be extended to other countries by adding an outer loop given in the last part. The below codes takes several minutes to run.
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
hotelname_list = []
city_list = []
countries_list = []
rating_list = []
prince_list = []
Amenities_list = []
HotelDescription_list = []
Review1_list = []
Review2_list = []
hotel_name = ""
city_name = ""
country_name = ""
ratingl = ""
pricel = ""
amenities = ""
descriptionl = ""
review1l = ""
review2l = ""
url = 'https://www.goibibo.com/destinations/all-states-in-india/'
data = requests.get(url)
html = data.text
soup = BeautifulSoup(html, 'html.parser')
cards = soup.find_all('div', {'class' : 'col-md-4 col-sm-4 col-xs-12 filtr-item posrel'})
state_urls = []
state_names = []
for card in cards :
for a in card.find_all('a', href=True):
if a.text.rstrip():
state_urls.append(a['href'])
state_names.append(a.text.rstrip())
length = len(state_urls)
for i in range(length):
url = state_urls[i]
country_name = 'India'
data = requests.get(url)
html = data.text
soup = BeautifulSoup(html, 'html.parser')
places_to_visit = soup.find('div', {'class' : 'place-to-visit-container'})
if(places_to_visit):
card = places_to_visit.find('div', {'class' : 'col-md-12'})
city_urls = {}
for a in card.find_all('a', href=True):
if a['href']:
list = a['href'].split('/')
city_urls[list[4]] = 'https://www.goibibo.com/hotels/hotels-in-' + list[4] + '-ct/'
for city in city_urls:
print(f'Extracting for city : {city}')
city_name = city
url = city_urls[city]
response = requests.get(url)
data = BeautifulSoup(response.text, 'html.parser')
cards_price_data = data.find_all('p', attrs={'class', 'HotelCardstyles__CurrentPrice-sc-1s80tyk-27 czKsrL'})
cards_url_data = data.find_all('div', attrs={'class', 'HotelCardstyles__HotelNameWrapperDiv-sc-1s80tyk-11 hiiHjq'})
hotel_price = {}
hotel_url = {}
for i in range(0, len(cards_price_data)):
hotel_price[cards_url_data[i].text.rstrip()] = cards_price_data[i].text.rstrip()
hotel_url[cards_url_data[i].text.rstrip()] = 'https://www.goibibo.com' + cards_url_data[i].find('a', href = True)['href']
for i in range(0, len(cards_price_data)):
url = hotel_url[cards_url_data[i].text.rstrip()]
data = requests.get(url)
html = data.text
hotel_name = cards_url_data[i].text.rstrip()
pricel = hotel_price[cards_url_data[i].text.rstrip()]
# print('Extracting for hotel : ' + cards_url_data[i].text.rstrip())
soup = BeautifulSoup(html, 'html.parser')
div = soup.find('div', { 'id': 'root' })
description = div.find('section', {'class' : 'HotelDetailsMain__HotelDetailsContainer-sc-2p7gdu-0 kpmitu'})
descriptiont = description.find('span', {'itemprop' : 'streetAddress'})
if descriptiont:
address = descriptiont.text.rstrip().replace(' View on Map', '')
descriptionl = address
rating = 'Rating not found'
ratingdata = description.find('span', {'itemprop' : 'ratingValue'}) #contains rating
if ratingdata:
rating = ratingdata.text.rstrip()
ratingl = rating
review1 = 'Review not found'
review2 = 'Review not found'
reviews = div.find_all('span', {'class' : 'UserReviewstyles__UserReviewTextStyle-sc-1y05l7z-4 dTkBBw'})
if(len(reviews) > 1):
review1 = reviews[0].text.rstrip()
if(len(reviews) > 3):
review2 = reviews[3].text.rstrip()
review1l = review1
review2l = review2
amenities_list = [] #contains all the amenities.
amenitiesdiv = div.find('div', {'class' : 'Amenitiesstyles__AmenitiesListBlock-sc-10opy4a-4 cMbIgg'})
if amenitiesdiv:
for amenity in amenitiesdiv.find_all('span', {'class':'Amenitiesstyles__AmenityItemText-sc-10opy4a-8 iwRmcg'}) :
if amenity:
amenities_list.append(amenity.text.rstrip())
else:
amenities_list.append('Amenity Not Found')
amenities = amenities_list
hotelname_list.append(hotel_name)
city_list.append(city_name)
countries_list.append(country_name)
rating_list.append(ratingl)
prince_list.append(pricel)
Amenities_list.append(amenities)
HotelDescription_list.append(descriptionl)
Review1_list.append(review1l)
Review2_list.append(review2l)
print(f'Extracted {len(cards_price_data)} hotels at {city} successfully')
dict = {'Hotel_Name': hotelname_list, 'City_Name': city_list, 'country_name': countries_list,
'Rating' : rating_list, 'Price/Night' : prince_list, 'Amenities' : Amenities_list,
'Description' : HotelDescription_list, 'Review1' : Review1_list, 'Review2' : Review2_list}
df = pd.DataFrame(dict)
df.to_csv('hotels.csv')
"""To extract for all the countries, we need to use the below code in the outer loop"""
hotelname_list = []
city_list = []
countries_list = []
rating_list = []
prince_list = []
Amenities_list = []
HotelDescription_list = []
Review1_list = []
Review2_list = []
hotel_name = ""
city_name = ""
country_name = ""
ratingl = ""
pricel = ""
amenities = ""
descriptionl = ""
review1l = ""
review2l = ""
url = 'https://www.goibibo.com/destinations/intl/all-countries/'
data = requests.get(url)
html = data.text
soup = BeautifulSoup(html, 'html.parser')
cards = soup.find_all('div', {'class' : 'col-md-4 col-sm-4 col-xs-12 filtr-item posrel'})
country_urls = []
country_names = []
for card in cards :
for a in card.find_all('a', href=True):
if a['href']:
country_urls.append(a['href'])
country_names.append(a.text.rstrip())
length = len(country_urls)
for i in range(length):
url = country_urls[i]
country_name = country_names[i]
data = requests.get(url)
html = data.text
soup = BeautifulSoup(html, 'html.parser')
places_to_visit = soup.find('div', {'class' : 'place-to-visit-container'})
if(places_to_visit):
card = places_to_visit.find('div', {'class' : 'col-md-12'})
city_urls = {}
for a in card.find_all('a', href=True):
if a['href']:
list = a['href'].split('/')
city_urls[list[3]] = 'https://www.goibibo.com/hotels/intl-hotels-in-' + list[3] + '-ct/'
print(country_name)
| 36.451977
| 160
| 0.653131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,864
| 0.288903
|
dd4965798452f29205244dc8f8464e898af885fa
| 234
|
py
|
Python
|
groundstation/ROV/OCR/SScrop.py
|
iturov/rov2018
|
ca1949806d105a2caddf2cf7a1361e2d3f6a1246
|
[
"MIT"
] | 3
|
2018-01-26T14:00:50.000Z
|
2018-08-08T06:44:21.000Z
|
groundstation/ROV/OCR/SScrop.py
|
iturov/rov2018
|
ca1949806d105a2caddf2cf7a1361e2d3f6a1246
|
[
"MIT"
] | null | null | null |
groundstation/ROV/OCR/SScrop.py
|
iturov/rov2018
|
ca1949806d105a2caddf2cf7a1361e2d3f6a1246
|
[
"MIT"
] | 2
|
2018-08-08T06:44:23.000Z
|
2020-10-24T11:36:33.000Z
|
import pyscreenshot as ImageGrab
i=0
src_path ="C:\\Users\\Public\\ROV\OCR\\"
if __name__ == "__main__":
# part of the screen
im=ImageGrab.grab(bbox=(200,100,1100,600)) # X1,Y1,X2,Y2
im.save(src_path + 'init.png')
| 14.625
| 60
| 0.645299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.354701
|
dd4ba76a5fa9e5f97446998ac4f6a5e6ee41ec63
| 3,008
|
py
|
Python
|
tests/http_client/conftest.py
|
sjaensch/aiobravado
|
d3f1eb71883b1f24c4b592917890160eb3d3cbcc
|
[
"BSD-3-Clause"
] | 19
|
2017-11-20T22:47:12.000Z
|
2021-12-23T15:56:41.000Z
|
tests/http_client/conftest.py
|
sjaensch/aiobravado
|
d3f1eb71883b1f24c4b592917890160eb3d3cbcc
|
[
"BSD-3-Clause"
] | 10
|
2018-01-11T12:53:01.000Z
|
2020-01-27T20:05:51.000Z
|
tests/http_client/conftest.py
|
sjaensch/aiobravado
|
d3f1eb71883b1f24c4b592917890160eb3d3cbcc
|
[
"BSD-3-Clause"
] | 4
|
2017-11-18T12:37:14.000Z
|
2021-03-19T14:48:13.000Z
|
# -*- coding: utf-8 -*-
import threading
import time
import bottle
import ephemeral_port_reserve
import pytest
import umsgpack
from bravado_core.content_type import APP_JSON
from bravado_core.content_type import APP_MSGPACK
from six.moves import urllib
ROUTE_1_RESPONSE = b'HEY BUDDY'
ROUTE_2_RESPONSE = b'BYE BUDDY'
API_RESPONSE = {'answer': 42}
SWAGGER_SPEC_DICT = {
'swagger': '2.0',
'info': {'version': '1.0.0', 'title': 'Integration tests'},
'definitions': {
'api_response': {
'properties': {
'answer': {
'type': 'integer'
},
},
'required': ['answer'],
'type': 'object',
'x-model': 'api_response',
'title': 'api_response',
}
},
'basePath': '/',
'paths': {
'/json': {
'get': {
'operationId': 'get_json',
'tags': ['json'],
'produces': ['application/json'],
'responses': {
'200': {
'description': 'HTTP/200',
'schema': {'$ref': '#/definitions/api_response'},
},
},
},
},
'/msgpack': {
'get': {
'produces': ['application/msgpack'],
'responses': {
'200': {
'description': 'HTTP/200',
'schema': {'$ref': '#/definitions/api_response'},
}
}
}
}
}
}
@bottle.get('/swagger.json')
def swagger_spec():
return SWAGGER_SPEC_DICT
@bottle.get('/json')
def api_json():
bottle.response.content_type = APP_JSON
return API_RESPONSE
@bottle.route('/msgpack')
def api_msgpack():
bottle.response.content_type = APP_MSGPACK
return umsgpack.packb(API_RESPONSE)
@bottle.route('/1')
def one():
return ROUTE_1_RESPONSE
@bottle.route('/2')
def two():
return ROUTE_2_RESPONSE
@bottle.post('/double')
def double():
x = bottle.request.params['number']
return str(int(x) * 2)
@bottle.get('/sleep')
def sleep_api():
sec_to_sleep = float(bottle.request.GET.get('sec', '1'))
time.sleep(sec_to_sleep)
return sec_to_sleep
def wait_unit_service_starts(url, timeout=10):
start = time.time()
while time.time() < start + timeout:
try:
urllib.request.urlopen(url, timeout=2)
except urllib.error.HTTPError:
return
except urllib.error.URLError:
time.sleep(0.1)
@pytest.yield_fixture(scope='session')
def threaded_http_server():
port = ephemeral_port_reserve.reserve()
thread = threading.Thread(
target=bottle.run, kwargs={'host': 'localhost', 'port': port},
)
thread.daemon = True
thread.start()
server_address = 'http://localhost:{port}'.format(port=port)
wait_unit_service_starts(server_address)
yield server_address
| 24.064
| 73
| 0.539894
| 0
| 0
| 358
| 0.119016
| 1,081
| 0.359375
| 0
| 0
| 688
| 0.228723
|
dd4bd1dde3eae994bf4970c151cbd96f077c070c
| 1,479
|
py
|
Python
|
test/test_convvae.py
|
kejiejiang/UnsupervisedDeepLearning-Pytorch
|
6ea7b7151ae62bf0130b56cc023f2be068aa87f0
|
[
"MIT"
] | 87
|
2017-11-22T02:59:24.000Z
|
2022-01-16T13:08:40.000Z
|
test/test_convvae.py
|
CauchyLagrange/UnsupervisedDeepLearning-Pytorch
|
6ea7b7151ae62bf0130b56cc023f2be068aa87f0
|
[
"MIT"
] | 3
|
2018-04-24T11:46:51.000Z
|
2020-01-07T00:01:46.000Z
|
test/test_convvae.py
|
CauchyLagrange/UnsupervisedDeepLearning-Pytorch
|
6ea7b7151ae62bf0130b56cc023f2be068aa87f0
|
[
"MIT"
] | 25
|
2018-03-15T04:02:21.000Z
|
2021-12-30T09:24:19.000Z
|
import torch
import torch.utils.data
from torchvision import datasets, transforms
import numpy as np
from udlp.autoencoder.convVAE import ConvVAE
import argparse
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--lr', type=float, default=0.0001, metavar='N',
help='learning rate for training (default: 0.001)')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--save', type=str, default="", metavar='N',
help='number of epochs to train (default: 10)')
args = parser.parse_args()
train_loader = torch.utils.data.DataLoader(
datasets.SVHN('./dataset/svhn', split='train', download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(
datasets.SVHN('./dataset/svhn', split='test', download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, num_workers=2)
vae = ConvVAE(width=32, height=32, nChannels=3, hidden_size=500, z_dim=100, binary=True,
nFilters=64)
vae.fit(train_loader, test_loader, lr=args.lr, num_epochs=args.epochs)
if args.save!="":
torch.save(vae.state_dict(), args.save)
| 46.21875
| 98
| 0.697769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 291
| 0.196755
|
dd4db1c506d212f1e3f132f918bc8f782cebb18b
| 673
|
py
|
Python
|
SourceCode/Bayes/bayeslearn.py
|
xuanyuansen/PyMachineLearning
|
c5b88c4d9aa2f5d59160d0824f7cee8377e7e16e
|
[
"Apache-2.0"
] | 1
|
2017-01-17T06:19:33.000Z
|
2017-01-17T06:19:33.000Z
|
SourceCode/Bayes/bayeslearn.py
|
xuanyuansen/PyMachineLearning
|
c5b88c4d9aa2f5d59160d0824f7cee8377e7e16e
|
[
"Apache-2.0"
] | null | null | null |
SourceCode/Bayes/bayeslearn.py
|
xuanyuansen/PyMachineLearning
|
c5b88c4d9aa2f5d59160d0824f7cee8377e7e16e
|
[
"Apache-2.0"
] | null | null | null |
#coding=utf-8
'''
Created on 2013年9月20日
@author: Wangliaofan
'''
import bayes
import feedparser
from time import *
if __name__== '__main__':
listOPosts,listClasses = bayes.loadDataSet()
print listOPosts,listClasses
myVocabList = bayes.createVocabList(listOPosts)
print myVocabList
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(bayes.setOfWords2Vec(myVocabList, postinDoc))
print trainMat
p0V,p1V,pAb=bayes.trainNB0(trainMat, listClasses)
print p0V
print p1V
print pAb
#ny=feedparser.parse('http://newyork.craigslist.org/stp/index.rss')
#sleep(5)
#print ny['entries']
bayes.spamTest()
pass
| 24.035714
| 71
| 0.708767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 176
| 0.259205
|
dd57860debea07d7b1dee00c8d3f246398e5a1ff
| 573
|
py
|
Python
|
modules/yats/middleware/header.py
|
PrathameshBolade/yats
|
93bb5271255120b7131a3bc416e3386428a4d3ec
|
[
"MIT"
] | 54
|
2015-01-26T07:56:59.000Z
|
2022-03-10T18:48:05.000Z
|
modules/yats/middleware/header.py
|
PrathameshBolade/yats
|
93bb5271255120b7131a3bc416e3386428a4d3ec
|
[
"MIT"
] | 8
|
2015-03-15T18:33:39.000Z
|
2021-12-21T14:23:11.000Z
|
modules/yats/middleware/header.py
|
PrathameshBolade/yats
|
93bb5271255120b7131a3bc416e3386428a4d3ec
|
[
"MIT"
] | 23
|
2015-02-19T16:55:35.000Z
|
2022-03-11T19:49:06.000Z
|
# -*- coding: utf-8 -*-
from socket import gethostname
def ResponseInjectHeader(get_response):
def middleware(request):
setattr(request, '_dont_enforce_csrf_checks', True)
response = get_response(request)
# response['Access-Control-Allow-Origin'] = '*'
# response['Access-Control-Allow-Methods'] = 'GET, POST'
response['X-ProcessedBy'] = gethostname()
response['Cache-Control'] = 'no-cache, must-revalidate'
response['Expires'] = 'Sat, 26 Jul 1997 05:00:00 GMT'
return response
return middleware
| 30.157895
| 64
| 0.649215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 250
| 0.4363
|
dd58051ac5d7683774d3d6e01bb0dea25252af19
| 1,334
|
py
|
Python
|
handshake_client/sockets.py
|
naoki-maeda/handshake-client-py
|
286884b358e15f84965f3c3224cfabd83e1a1406
|
[
"MIT"
] | 3
|
2020-12-31T08:29:20.000Z
|
2021-08-14T14:41:22.000Z
|
handshake_client/sockets.py
|
naoki-maeda/handshake-client-py
|
286884b358e15f84965f3c3224cfabd83e1a1406
|
[
"MIT"
] | null | null | null |
handshake_client/sockets.py
|
naoki-maeda/handshake-client-py
|
286884b358e15f84965f3c3224cfabd83e1a1406
|
[
"MIT"
] | 1
|
2020-05-25T14:26:33.000Z
|
2020-05-25T14:26:33.000Z
|
import logging
import socketio
logger = logging.getLogger("handshake.socket")
sio = socketio.AsyncClient(logger=logger)
async def get_connection(
url: str, api_key: str, watch_chain: bool = True, watch_mempool: bool = True,
) -> socketio.AsyncClient:
"""
see https://hsd-dev.org/guides/events.html
"""
assert type(url) == str
assert type(api_key) == str
assert type(watch_chain) == bool
assert type(watch_mempool) == bool
if sio.connected is False:
await sio.connect(url, transports=["websocket"])
await sio.call("auth", api_key)
if watch_chain:
await sio.call("watch chain")
if watch_mempool:
await sio.call("watch mempool")
return sio
@sio.event
async def disconnect() -> None:
logger.info("closing socket connection")
if sio.connected:
await sio.disconnect()
async def get_wallet_connection(
url: str, api_key: str, wallet_id: str = "*",
) -> socketio.AsyncClient:
"""
see https://hsd-dev.org/guides/events.html
"""
assert type(url) == str
assert type(api_key) == str
assert type(wallet_id) == str
if sio.connected is False:
await sio.connect(url, transports=["websocket"])
await sio.call("auth", api_key)
await sio.call("join", wallet_id)
return sio
| 26.68
| 81
| 0.642429
| 0
| 0
| 0
| 0
| 140
| 0.104948
| 1,192
| 0.893553
| 232
| 0.173913
|
dd583f7d7e613589a62fb063aca89c71b3483ad8
| 5,768
|
py
|
Python
|
validation_tests/case_studies/towradgi_simulation/catchment_info.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | 136
|
2015-05-07T05:47:43.000Z
|
2022-02-16T03:07:40.000Z
|
validation_tests/case_studies/towradgi_simulation/catchment_info.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | 184
|
2015-05-03T09:27:54.000Z
|
2021-12-20T04:22:48.000Z
|
validation_tests/case_studies/towradgi_simulation/catchment_info.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | 70
|
2015-03-18T07:35:22.000Z
|
2021-11-01T07:07:29.000Z
|
def create_catchment_list(simulation):
from os.path import join
channel_manning = simulation.args.channel_manning
CatchmentList = [
[join('Model', 'Bdy', 'Catchment.csv'), 100.0],
[join('Model', 'Bdy', 'FineCatchment.csv'), 36.0],
[join('Model', 'Bdy', 'CreekBanks.csv'), 8.0]
]
return CatchmentList
def create_manning_list(simulation):
from os.path import join
channel_manning = simulation.args.channel_manning
## IMPORTANT -- The ORDER in ManningList matters: When there is overlap,
## priority regions at BOTTOM
## FIXME: This setup can be done with fewer lines of code!
ManningList = [
[ join('Model', 'Mannings', '1.csv'),0.04], #park
[ join('Model', 'Mannings', '2.csv'),0.15],
[ join('Model', 'Mannings', '3.csv'),0.15],
[ join('Model', 'Mannings', '4.csv'),0.04],
[ join('Model', 'Mannings', '5.csv'),0.15],
[ join('Model', 'Mannings', '6.csv'),0.15],
[ join('Model', 'Mannings', '7.csv'),0.15],
[ join('Model', 'Mannings', '8.csv'),0.15],
[ join('Model', 'Mannings', '9.csv'),0.04], #park
[ join('Model', 'Mannings', '10.csv'), 0.15],
[ join('Model', 'Mannings', '11.csv'), 0.15],
[ join('Model', 'Mannings', '12.csv'), 0.15],
[ join('Model', 'Mannings', '13.csv'), 0.04],
[ join('Model', 'Mannings', '14.csv'), 0.15],
[ join('Model', 'Mannings', '15.csv'), 0.15],
[ join('Model', 'Mannings', '16.csv'), 0.15],
[ join('Model', 'Mannings', '17.csv'), 0.15],
[ join('Model', 'Mannings', '18.csv'), 0.045],
[ join('Model', 'Mannings', '18a.csv'), 0.15],
[ join('Model', 'Mannings', '18b.csv'), 0.15],
[ join('Model', 'Mannings', '18c.csv'), 0.15],
[ join('Model', 'Mannings', '18d.csv'), 0.15],
[ join('Model', 'Mannings', '18e.csv'), 0.08], #cokeworks site
[ join('Model', 'Mannings', '19.csv'), 0.15],
[ join('Model', 'Mannings', '20.csv'), 0.15],
[ join('Model', 'Mannings', '21.csv'), 0.15],
[ join('Model', 'Mannings', '22.csv'), 0.15],
[ join('Model', 'Mannings', '23.csv'), 0.15],
[ join('Model', 'Mannings', '24.csv'), 0.05],
[ join('Model', 'Mannings', '25.csv'), 0.15],
[ join('Model', 'Mannings', '26.csv'), 0.15],
[ join('Model', 'Mannings', '27.csv'), 0.15],
[ join('Model', 'Mannings', '28.csv'), 0.15],
[ join('Model', 'Mannings', '29.csv'), 0.15],
[ join('Model', 'Mannings', '30.csv'), 0.15],
[ join('Model', 'Mannings', '31.csv'), 0.15],
[ join('Model', 'Mannings', '32.csv'), 0.15],
[ join('Model', 'Mannings', '33.csv'), 0.15],
[ join('Model', 'Mannings', '34.csv'), 0.15],
[ join('Model', 'Mannings', '35.csv'), 0.15],
[ join('Model', 'Mannings', '36.csv'), 0.05],
[ join('Model', 'Mannings', '37.csv'), 0.15],
[ join('Model', 'Mannings', '38.csv'), 0.15],
[ join('Model', 'Mannings', '39.csv'), 0.15],
[ join('Model', 'Mannings', '40.csv'), 0.15],
[ join('Model', 'Mannings', '41.csv'), 0.15],
[ join('Model', 'Mannings', '42.csv'), 0.15],
[ join('Model', 'Mannings', '43.csv'), 0.15],
[ join('Model', 'Mannings', '44.csv'), 0.15],
[ join('Model', 'Mannings', '45.csv'), 0.15],
[ join('Model', 'Mannings', '46.csv'), 0.15],
[ join('Model', 'Mannings', '47.csv'), 0.15],
[ join('Model', 'Mannings', '48.csv'), 0.15],
[ join('Model', 'Mannings', '49.csv'), 0.15],
[ join('Model', 'Mannings', '50.csv'), 0.15],
[ join('Model', 'Mannings', '51.csv'), 0.15],
[ join('Model', 'Mannings', '52.csv'), 0.15],
[ join('Model', 'Mannings', '53.csv'), 0.15],
[ join('Model', 'Mannings', '54.csv'), 0.15],
[ join('Model', 'Mannings', '55.csv'), 0.15],
[ join('Model', 'Mannings', '56.csv'), 0.15],
[ join('Model', 'Mannings', '57.csv'), 0.15],
[ join('Model', 'Mannings', '58.csv'), 0.15],
[ join('Model', 'Mannings', '59.csv'), 0.08],
[ join('Model', 'Mannings', '60.csv'), 0.15],
[ join('Model', 'Mannings', '61.csv'), 0.08],
[ join('Model', 'Mannings', '62.csv'), 0.15],
[ join('Model', 'Mannings', '63.csv'), 0.08],
[ join('Model', 'Mannings', '64.csv'), 0.15],
[ join('Model', 'Mannings', '65.csv'), 0.15],
[ join('Model', 'Mannings', '66.csv'), 0.15],
[ join('Model', 'Mannings', '67.csv'), 0.15],
[ join('Model', 'Mannings', '68.csv'), 0.15],
[ join('Model', 'Mannings', '69.csv'), 0.15],
[ join('Model', 'Mannings', '70.csv'), 0.15],
[ join('Model', 'Mannings', '71.csv'), 0.05],
[ join('Model', 'Mannings', '72.csv'), 0.15],
[ join('Model', 'Mannings', '73.csv'), 0.15],
[ join('Model', 'Mannings', '74.csv'), 0.15],
[ join('Model', 'Mannings', '75.csv'), 0.15],
[ join('Model', 'Mannings', '76.csv'), 0.15],
[ join('Model', 'Mannings', '77.csv'), 0.07],
[ join('Model', 'Mannings', '78.csv'), 0.15],
[ join('Model', 'Mannings', '79.csv'), 0.15],
[ join('Model', 'Mannings', '80.csv'), 0.15],
[ join('Model', 'Mannings', '81.csv'), 0.15],
[ join('Model', 'Mannings', '82.csv'), 0.15],
[ join('Model', 'Mannings', '83.csv'), 0.15],
[ join('Model', 'Mannings', '84.csv'), 0.15],
[ join('Model', 'Mannings', '85.csv'), 0.15],
[ join('Model', 'Mannings', '86.csv'), 0.15],
[ join('Model', 'Mannings', 'Escarpement.csv'), 0.15],
[ join('Model', 'Mannings', 'Railway.csv'), 0.04],
[ join('Model', 'Creeks', 'creeks.csv'), channel_manning]
]
return ManningList
| 46.516129
| 76
| 0.495319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,645
| 0.458564
|
dd586c3a691480974c3b96292cc74640fddadda5
| 869
|
py
|
Python
|
generator01/testing/test_generator01.py
|
sku899/World_Travel_Language_Wizard
|
a9e009336e2f53c5fc0f3e40af51f34335645e5f
|
[
"MIT"
] | null | null | null |
generator01/testing/test_generator01.py
|
sku899/World_Travel_Language_Wizard
|
a9e009336e2f53c5fc0f3e40af51f34335645e5f
|
[
"MIT"
] | null | null | null |
generator01/testing/test_generator01.py
|
sku899/World_Travel_Language_Wizard
|
a9e009336e2f53c5fc0f3e40af51f34335645e5f
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from flask import url_for, Response, request
from flask_testing import TestCase
from random import randint
from app import app
class TestBase(TestCase):
def create_app(self):
return app
class TestResponse(TestBase):
def rand_country(self):
countries = ['German', 'Spanish', 'French', 'Russian', 'Chinese', 'Portuguese','Hindi','Arabic','Japanese', 'Korean']
response = self.client.get(url_for("random_generator"))
self.assertIn(countries[int(response.data)-1], countries)
def test_country(self):
with patch("requests.get") as g:
g.return_value.text = b"1"
response = self.client.get(url_for("random_generator"))
random_output = ['1','2','3','4','5','6','7','8','9','10']
self.assertIn(response.data.decode('utf-8'), random_output)
| 34.76
| 126
| 0.653625
| 705
| 0.811277
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.207135
|
dd5b35b49e23eb6c89bb23b5e7b7a0d158afacb3
| 14,640
|
py
|
Python
|
assets/arguments.py
|
YuhangSong/Arena-Baselines-Depreciated
|
78c33994e67aede7565dda3f68f5cebe0d5ee6e6
|
[
"Apache-2.0"
] | null | null | null |
assets/arguments.py
|
YuhangSong/Arena-Baselines-Depreciated
|
78c33994e67aede7565dda3f68f5cebe0d5ee6e6
|
[
"Apache-2.0"
] | null | null | null |
assets/arguments.py
|
YuhangSong/Arena-Baselines-Depreciated
|
78c33994e67aede7565dda3f68f5cebe0d5ee6e6
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import torch
import assets.utils as utils
def log_args(args, tf_summary):
args_dict = args.__dict__
from pytablewriter import MarkdownTableWriter
writer = MarkdownTableWriter()
writer.table_name = "Configurations (Args)"
writer.headers = ["Parameter", "Value"]
print('# INFO: [{} start >>>>]'.format(writer.table_name))
writer.value_matrix = []
for key in args_dict.keys():
print('# INFO: [Config/Args][{} : {}]'.format(key, args_dict[key]))
writer.value_matrix += [[str(key), str(args_dict[key])]]
print('# INFO: [>>>> {} end]'.format(writer.table_name))
args_markdown_str = writer.dumps()
tf_summary.add_text(writer.table_name, args_markdown_str)
def generate_env_related_args(args, envs):
args.obs_shape = envs.observation_space.shape
args.action_space = envs.action_space
args.num_agents = envs.unwrapped.num_agents
if args.population_number == 1:
print('# INFO: baseline: self-play')
args.learning_agent_ids = [0]
elif args.population_number > 1:
print('# INFO: baseline: population-based training')
args.learning_agent_ids = range(args.num_agents)
if args.population_number < args.num_agents:
raise Exception(
'# ERROR: population_number should be at least the same as num_agents')
else:
raise Exception('# ERROR: invalid population_number')
return args
def get_args():
parser = argparse.ArgumentParser(description='RL')
'''general args'''
parser.add_argument('--mode', type=str, default='train',
help='\
[train: standard training]\
[vis_train: visualize training, using one process and full render]\
[pth2nn: convert pytorch .pth checkpoint to .nn checkpoint that can be used in unity editor]\
[eval_population: evaluate population performance]\
[eval_human: evaluate against human player]\
[eval_round: evaluate agent against agent]\
[scaler2fig: convert scalers logged in tensorboardX to fig]\
')
parser.add_argument('--env-name',
help='[general][environment to train on]')
parser.add_argument('--obs-type', default='visual',
help='[general][observation type: visual, ram]')
parser.add_argument('--num-env-steps', type=int, default=10e6,
help='[general][number of environment steps to train (default: 10e6)]')
parser.add_argument('--store-interval', type=int, default=int(60 * 10),
help='[general][save interval (in seconds)')
parser.add_argument('--log-dir', default='/tmp/gym/',
help='[general][directory to save agent logs (default: /tmp/gym)]')
parser.add_argument('--log-episode-every-minutes', type=float, default=20.0,
help='[general][log episode every x minutes]')
parser.add_argument('--seed', type=int, default=1,
help='[general][random seed (default: 1)]')
parser.add_argument('--cuda-deterministic', action='store_true', default=False,
help="[general][sets flags for determinism when using CUDA (potentially slow!)]")
parser.add_argument('--no-cuda', action='store_true', default=False,
help='[general][disables CUDA training]')
parser.add_argument('--num-eval-episodes', type=int, default=10,
help='[general][how many episodes to run for one evaluation]')
parser.add_argument('--arena-start-index', type=int, default=2394,
help='[general][each arena runs on a port, specify the ports to run the arena]')
parser.add_argument('--aux', type=str, default='',
help='[general][some aux information you may want to record along with this run]')
'''brain args'''
parser.add_argument('--add-timestep', action='store_true', default=False,
help='[brain][if add timestep to observations]')
parser.add_argument('--num-frame-stack', type=int, default=4,
help='[brain][num of stacked frames per observation]')
parser.add_argument('--recurrent-brain', action='store_true', default=False,
help='[brain][if use a recurrent policy]')
parser.add_argument('--normalize-obs', action='store_true', default=False,
help='[brain][if normalize observation with a running mean and variance]')
parser.add_argument('--batch-normalize', action='store_true', default=False,
help='[brain][if use batch normalize]')
parser.add_argument('--normalize-field', action='store_true', default=False,
help='[brain][C4NN][if normalize field]')
parser.add_argument('--normalize-kernal', action='store_true', default=False,
help='[brain][C4NN][if normalize kernal]')
parser.add_argument('--normalize-cross-coefficient', action='store_true', default=False,
help='[brain][C4NN][if normalize cross coefficient]')
parser.add_argument('--geographical-net', action='store_true', default=False,
help='[brain][GN][if use geographical network]')
'''trainer args'''
parser.add_argument('--trainer-id', default='a2c',
help='[trainer][trainer to use: a2c | ppo | acktr]')
parser.add_argument('--lr', type=float, default=7e-4,
help='[trainer][learning rate (default: 7e-4)]')
parser.add_argument('--eps', type=float, default=1e-5,
help='[trainer][RMSprop optimizer epsilon (default: 1e-5)]')
parser.add_argument('--alpha', type=float, default=0.99,
help='[trainer][RMSprop optimizer apha (default: 0.99)]')
parser.add_argument('--gamma', type=float, default=0.99,
help='[trainer][discount factor for rewards (default: 0.99)]')
parser.add_argument('--use-gae', action='store_true', default=False,
help='[trainer][use generalized advantage estimation]')
parser.add_argument('--tau', type=float, default=0.95,
help='[trainer][gae parameter (default: 0.95)]')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='[trainer][entropy term coefficient (default: 0.01)]')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='[trainer][value loss coefficient (default: 0.5)]')
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='[trainer][max norm of gradients (default: 0.5)]')
parser.add_argument('--num-processes', type=int, default=16,
help='[trainer][how many training CPU processes to use (default: 16)]')
parser.add_argument('--num-steps', type=int, default=5,
help='[trainer][number of forward steps in A2C (default: 5)]')
parser.add_argument('--ppo-epoch', type=int, default=4,
help='[trainer][number of ppo epochs (default: 4)]')
parser.add_argument('--num-mini-batch', type=int, default=32,
help='[trainer][number of batches for ppo (default: 32)]')
parser.add_argument('--clip-param', type=float, default=0.2,
help='[trainer][ppo clip parameter (default: 0.2)]')
parser.add_argument('--use-linear-lr-decay', action='store_true', default=False,
help='[trainer][use a linear schedule on the learning rate]')
parser.add_argument('--use-linear-clip-decay', action='store_true', default=False,
help='[trainer][use a linear schedule on the ppo clipping parameter]')
'''multi-agent args'''
parser.add_argument('--population-number', type=int, default=1,
help='[multi-agent][number of agents in population train]')
parser.add_argument('--reload-agents-interval', type=int, default=(60 * 1),
help='[multi-agent][interval to reload agents (in seconds)]')
parser.add_argument('--reload-playing-agents-principle', type=str, default='OpenAIFive',
help='[multi-agent][principle of choosing a agents from historical checkpoints]\
[\
recent(the most recent checkpoint),\
uniform(uniformly sample from historical checkpoint),\
OpenAIFive(0.8 probability to be recent, 0.2 probability to be uniform)\
]')
parser.add_argument('--playing-agents-deterministic', action='store_false', default=True,
help='[eval][if playing agent act deterministically]')
'''eval args'''
parser.add_argument('--population-eval-start', type=int, default=0,
help='[eval][population-eval][when do population eval, start from x checkpoint]')
parser.add_argument('--population-eval-skip-interval', type=int, default=4,
help='[eval][population-eval][when do population eval, skip every x checkpoints]')
parser.add_argument('--learning-agents-deterministic', action='store_true', default=False,
help='[eval][if learning agent act deterministically]')
parser.add_argument('--record-screen', action='store_true', default=False,
help='[eval][if record the screen]')
parser.add_argument('--human-controled-agent-ids', type=str, default='',
help='set the list of agents (specified by its id) that is controlled by human, example: 1,2,4')
args = parser.parse_args()
args.log_dir = '../results/'
if (args.mode in ['vis_train']) or ('eval' in args.mode):
print('# WARNING: set num_processes to 1 for eval purpose')
args.num_processes = 1
args.num_mini_batch = 1
def add_to_log_dir(key_, value_):
args.log_dir = '{}__{}-{}'.format(
args.log_dir,
key_,
value_,
)
'''general'''
add_to_log_dir('en', args.env_name)
add_to_log_dir('ot', args.obs_type)
'''brain'''
add_to_log_dir('nfs', args.num_frame_stack)
add_to_log_dir('rb', args.recurrent_brain)
add_to_log_dir('no', args.normalize_obs)
add_to_log_dir('bn', args.batch_normalize)
add_to_log_dir('nf', args.normalize_field)
add_to_log_dir('nk', args.normalize_kernal)
add_to_log_dir('ncc', args.normalize_cross_coefficient)
add_to_log_dir('gn', args.geographical_net)
'''trainer'''
add_to_log_dir('ti', args.trainer_id)
'''multi-agent settings'''
add_to_log_dir('pn', args.population_number)
add_to_log_dir('rpap', args.reload_playing_agents_principle)
add_to_log_dir('pad', args.playing_agents_deterministic)
'''general'''
add_to_log_dir('a', args.aux)
'''generated args'''
if args.obs_type in ['visual']:
args.use_visual = True
elif args.obs_type in ['ram']:
args.use_visual = False
else:
raise Exception('# ERROR: obs_type is not supported')
if args.mode in ['vis_train']:
args.is_envs_train_mode = False
else:
args.is_envs_train_mode = True
if 'NoFrameskip' in args.env_name:
args.game_class = 'Atari'
else:
args.game_class = 'Arena'
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.device = torch.device("cuda:0" if args.cuda else "cpu")
args.num_updates = int(
args.num_env_steps) // args.num_steps // args.num_processes
args.batch_size = args.num_processes * args.num_steps
args.mini_batch_size = args.batch_size // args.num_mini_batch
if args.trainer_id in ['ppo']:
args.use_clipped_value_loss = True
args.human_controled_agent_ids = args.human_controled_agent_ids.split(',')
_human_controled_agent_ids = []
for id in args.human_controled_agent_ids:
try:
_human_controled_agent_ids += [
int(id)
]
except Exception as e:
pass
args.human_controled_agent_ids = _human_controled_agent_ids
args.num_human_in_loop = len(args.human_controled_agent_ids)
args.is_human_in_loop = (args.num_human_in_loop > 0)
if args.is_human_in_loop:
input('# WARNING: human in loop, controling agent of id: {}'.format(
args.human_controled_agent_ids
))
args.is_shuffle_agents = False
# check configurations
if args.num_processes > 1:
input('# WARNING: only process 0 is controlled by human')
if (args.game_class in ['Arena']) and (args.is_envs_train_mode in [True]):
input('# WARNING: Arena env is running in train mode (faster and smaller), could be unsuitable for human in loop')
if args.num_human_in_loop > 1:
input('# WARNING: for now, only support one human in loop')
# init for human in loop
import pygame
pygame.init()
screen = pygame.display.set_mode((200, 150))
pygame.display.set_caption('Control Window')
else:
args.is_shuffle_agents = True
'''check args'''
assert args.trainer_id in ['a2c', 'ppo', 'acktr']
if args.recurrent_brain:
assert args.trainer_id in ['a2c', 'ppo'], \
'Recurrent policy is not implemented for ACKTR'
assert args.batch_size >= args.num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(args.num_processes, args.num_steps, args.num_processes * args.num_steps, args.num_mini_batch))
if args.recurrent_brain:
assert args.num_processes >= args.num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(args.num_processes, args.num_mini_batch))
'''prepare torch'''
torch.set_num_threads(1)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
return args
| 48.476821
| 126
| 0.616189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,950
| 0.406421
|
dd5c3b4cdcb7e58a2c1873f564ec41c534d2da13
| 687
|
py
|
Python
|
khtube/download_ffmpeg.py
|
KodersHub/khtube
|
b1a8f96b7ff27cbb7eae615e8aee7d27260f80e8
|
[
"MIT"
] | 1
|
2021-08-09T14:01:12.000Z
|
2021-08-09T14:01:12.000Z
|
khtube/download_ffmpeg.py
|
KodersHub/khtube
|
b1a8f96b7ff27cbb7eae615e8aee7d27260f80e8
|
[
"MIT"
] | null | null | null |
khtube/download_ffmpeg.py
|
KodersHub/khtube
|
b1a8f96b7ff27cbb7eae615e8aee7d27260f80e8
|
[
"MIT"
] | null | null | null |
from google_drive_downloader import GoogleDriveDownloader as gdd
import sys
import os
import requests
def get_platform():
platforms = {
'linux1' : 'Linux',
'linux2' : 'Linux',
'darwin' : 'OS X',
'win32' : 'Windows'
}
if sys.platform not in platforms:
return sys.platform
return platforms[sys.platform]
platform = get_platform()
if platform == "linux":
print("Nothing needs to install")
else:
print("Installing ffmpeg")
gdd.download_file_from_google_drive(file_id='1Q5zbaXonPEUNQmclp1WMIVVodnUuJdKo',
dest_path='./ffmpeg.exe',
unzip=False)
| 25.444444
| 84
| 0.604076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 161
| 0.234352
|
dd5ce8afa891dc4561f13cf8c918df7e99c18b1f
| 1,231
|
py
|
Python
|
climbing (1).py
|
VamsiKrishna1211/Hacker_rank_solutions
|
a683a36fcc2f011c120eb4d52aa08468deccc820
|
[
"Apache-2.0"
] | null | null | null |
climbing (1).py
|
VamsiKrishna1211/Hacker_rank_solutions
|
a683a36fcc2f011c120eb4d52aa08468deccc820
|
[
"Apache-2.0"
] | null | null | null |
climbing (1).py
|
VamsiKrishna1211/Hacker_rank_solutions
|
a683a36fcc2f011c120eb4d52aa08468deccc820
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the climbingLeaderboard function below.
def climbingLeaderboard(scores, alice):
li=[]
lis=[0 for i in range(len(scores))]
lis[0]=1
for i in range(1,len(scores)):
#print(i)
if scores[i]<scores[i-1]:
lis[i]=lis[i-1]+1
else:
lis[i]=lis[i-1]
#print(lis)
num=len(scores)-1
for i in range(len(alice)):
lis.append(lis[len(lis)-1]+1)
scores.append(alice[i])
for k in range(num,-1,-1):
if scores[len(scores)-1]>=scores[k]:
lis[len(lis)-1]=lis[k]
else:
break;
num=k+1
li.append(lis[len(lis)-1])
scores.pop(len(scores)-1)
lis.pop(len(lis)-1)
return li
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
scores_count = int(input())
scores = list(map(int, input().rstrip().split()))
alice_count = int(input())
alice = list(map(int, input().rstrip().split()))
result = climbingLeaderboard(scores, alice)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 20.864407
| 53
| 0.541024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 118
| 0.095857
|
dd5d2da4c7eb58adfbaff7779a18bcc9d814e736
| 25,661
|
py
|
Python
|
game_manager/machine_learning/block_controller_train.py
|
EndoNrak/tetris
|
0ce4863348d644b401c53e6c9a50cdc6f7430ed1
|
[
"MIT"
] | 1
|
2022-01-29T15:23:15.000Z
|
2022-01-29T15:23:15.000Z
|
game_manager/machine_learning/block_controller_train.py
|
EndoNrak/tetris
|
0ce4863348d644b401c53e6c9a50cdc6f7430ed1
|
[
"MIT"
] | null | null | null |
game_manager/machine_learning/block_controller_train.py
|
EndoNrak/tetris
|
0ce4863348d644b401c53e6c9a50cdc6f7430ed1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from datetime import datetime
import pprint
import random
import copy
import torch
import torch.nn as nn
from model.deepqnet import DeepQNetwork,DeepQNetwork_v2
import omegaconf
from hydra import compose, initialize
import os
from tensorboardX import SummaryWriter
from collections import deque
from random import random, sample,randint
import numpy as np
import subprocess
class Block_Controller(object):
# init parameter
board_backboard = 0
board_data_width = 0
board_data_height = 0
ShapeNone_index = 0
CurrentShape_class = 0
NextShape_class = 0
def __init__(self,load_weight=None):
# init parameter
self.mode = None
# train
self.init_train_parameter_flag = False
# predict
self.init_predict_parameter_flag = False
def set_parameter(self,weight=None):
cfg = self.yaml_read()
os.makedirs(cfg.common.dir,exist_ok=True)
self.saved_path = cfg.common.dir + "/" + cfg.common.weight_path
os.makedirs(self.saved_path ,exist_ok=True)
subprocess.run("cp config/default.yaml %s/"%(cfg.common.dir), shell=True)
self.writer = SummaryWriter(cfg.common.dir+"/"+cfg.common.log_path)
self.log = cfg.common.dir+"/log.txt"
self.log_score = cfg.common.dir+"/score.txt"
self.log_reward = cfg.common.dir+"/reward.txt"
self.state_dim = cfg.state.dim
with open(self.log,"w") as f:
print("start...", file=f)
with open(self.log_score,"w") as f:
print(0, file=f)
with open(self.log_reward,"w") as f:
print(0, file=f)
#=====Set tetris parameter=====
self.height = cfg.tetris.board_height
self.width = cfg.tetris.board_width
self.max_tetrominoes = cfg.tetris.max_tetrominoes
#=====load Deep Q Network=====
print("model name: %s"%(cfg.model.name))
if cfg.model.name=="DQN":
self.model = DeepQNetwork(self.state_dim)
self.initial_state = torch.FloatTensor([0 for i in range(self.state_dim)])
self.get_next_func = self.get_next_states
self.reward_func = self.step
elif cfg.model.name=="DQNv2":
self.model = DeepQNetwork_v2()
self.initial_state = torch.FloatTensor([[[0 for i in range(10)] for j in range(22)]])
self.get_next_func = self.get_next_states_v2
self.reward_func = self.step_v2
self.reward_weight = cfg.train.reward_weight
self.load_weight = cfg.common.load_weight
self.double_dqn = cfg.train.double_dqn
self.target_net = cfg.train.target_net
if self.double_dqn:
self.target_net = True
if self.target_net:
print("set target network...")
self.target_model = copy.deepcopy(self.model)
self.target_copy_intarval = cfg.train.target_copy_intarval
if self.mode=="predict":
if not weight==None:
print("load ",weight)
self.model = torch.load(weight)
self.model.eval()
else:
if not os.path.exists(self.load_weight):
print("%s is not existed!!"%(self.load_weight))
exit()
#self.model.load_state_dict(torch.load(self.load_weight))
self.model = torch.load(self.load_weight)
self.model.eval()
if torch.cuda.is_available():
self.model.cuda()
#=====Set hyper parameter=====
self.batch_size = cfg.train.batch_size
self.lr = cfg.train.lr
self.replay_memory_size = cfg.train.replay_memory_size
self.replay_memory = deque(maxlen=self.replay_memory_size)
self.num_decay_epochs = cfg.train.num_decay_epochs
self.num_epochs = cfg.train.num_epoch
self.initial_epsilon = cfg.train.initial_epsilon
self.final_epsilon = cfg.train.final_epsilon
self.save_interval = cfg.train.save_interval
#=====Set loss function and optimizer=====
if cfg.train.optimizer=="Adam" or cfg.train.optimizer=="ADAM":
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
self.scheduler = None
else:
self.momentum =cfg.train.lr_momentum
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=self.momentum)
self.lr_step_size = cfg.train.lr_step_size
self.lr_gamma = cfg.train.lr_gamma
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.lr_step_size , gamma=self.lr_gamma)
self.criterion = nn.MSELoss()
#=====Initialize parameter=====
self.epoch = 0
self.score = 0
self.max_score = -99999
self.epoch_reward = 0
self.cleared_lines = 0
self.iter = 0
self.state = self.initial_state
self.tetrominoes = 0
self.gamma = cfg.train.gamma
self.reward_clipping = cfg.train.reward_clipping
self.score_list = cfg.tetris.score_list
self.reward_list = cfg.train.reward_list
self.penalty = self.reward_list[5]
if self.reward_clipping:
self.norm_num =max(max(self.reward_list),abs(self.penalty))
self.reward_list =[r/self.norm_num for r in self.reward_list]
self.penalty /= self.norm_num
self.penalty = min(cfg.train.max_penalty,self.penalty)
#=====Prioritized Experience Replay=====
self.prioritized_replay = cfg.train.prioritized_replay
if self.prioritized_replay:
from machine_learning.qlearning import PRIORITIZED_EXPERIENCE_REPLAY as PER
self.PER = PER(self.replay_memory_size,gamma=self.gamma)
#更新
def update(self):
if self.mode=="train":
self.score += self.score_list[5]
self.replay_memory[-1][1] += self.penalty
self.replay_memory[-1][3] = True #store False to done lists.
self.epoch_reward += self.penalty
if len(self.replay_memory) < self.replay_memory_size / 10:
print("================pass================")
print("iter: {} ,meory: {}/{} , score: {}, clear line: {}, block: {} ".format(self.iter,
len(self.replay_memory),self.replay_memory_size / 10,self.score,self.cleared_lines
,self.tetrominoes ))
else:
print("================update================")
self.epoch += 1
if self.prioritized_replay:
batch,replay_batch_index = self.PER.sampling(self.replay_memory,self.batch_size)
else:
batch = sample(self.replay_memory, min(len(self.replay_memory),self.batch_size))
state_batch, reward_batch, next_state_batch, done_batch = zip(*batch)
state_batch = torch.stack(tuple(state for state in state_batch))
reward_batch = torch.from_numpy(np.array(reward_batch, dtype=np.float32)[:, None])
next_state_batch = torch.stack(tuple(state for state in next_state_batch))
done_batch = torch.from_numpy(np.array(done_batch)[:, None])
#max_next_state_batch = torch.stack(tuple(state for state in max_next_state_batch))
q_values = self.model(state_batch)
if self.target_net:
if self.epoch %self.target_copy_intarval==0 and self.epoch>0:
print("target_net update...")
self.target_model = torch.load(self.max_weight)
#self.target_model = copy.copy(self.model)
#self.max_score = -99999
self.target_model.eval()
#======predict Q(S_t+1 max_a Q(s_(t+1),a))======
with torch.no_grad():
next_prediction_batch = self.target_model(next_state_batch)
else:
self.model.eval()
with torch.no_grad():
next_prediction_batch = self.model(next_state_batch)
self.model.train()
y_batch = torch.cat(
tuple(reward if done[0] else reward + self.gamma * prediction for done ,reward, prediction in
zip(done_batch,reward_batch, next_prediction_batch)))[:, None]
self.optimizer.zero_grad()
if self.prioritized_replay:
loss_weights = self.PER.update_priority(replay_batch_index,reward_batch,q_values,next_prediction_batch)
#print(loss_weights *nn.functional.mse_loss(q_values, y_batch))
loss = (loss_weights *self.criterion(q_values, y_batch)).mean()
#loss = self.criterion(q_values, y_batch)
loss.backward()
else:
loss = self.criterion(q_values, y_batch)
loss.backward()
self.optimizer.step()
if self.scheduler!=None:
self.scheduler.step()
log = "Epoch: {} / {}, Score: {}, block: {}, Reward: {:.1f} Cleared lines: {}".format(
self.epoch,
self.num_epochs,
self.score,
self.tetrominoes,
self.epoch_reward,
self.cleared_lines
)
print(log)
with open(self.log,"a") as f:
print(log, file=f)
with open(self.log_score,"a") as f:
print(self.score, file=f)
with open(self.log_reward,"a") as f:
print(self.epoch_reward, file=f)
self.writer.add_scalar('Train/Score', self.score, self.epoch - 1)
self.writer.add_scalar('Train/Reward', self.epoch_reward, self.epoch - 1)
self.writer.add_scalar('Train/block', self.tetrominoes, self.epoch - 1)
self.writer.add_scalar('Train/clear lines', self.cleared_lines, self.epoch - 1)
if self.epoch > self.num_epochs:
with open(self.log,"a") as f:
print("finish..", file=f)
exit()
else:
self.epoch += 1
log = "Epoch: {} / {}, Score: {}, block: {}, Reward: {:.1f} Cleared lines: {}".format(
self.epoch,
self.num_epochs,
self.score,
self.tetrominoes,
self.epoch_reward,
self.cleared_lines
)
pass
#パラメータ読み込み
def yaml_read(self):
initialize(config_path="../../config", job_name="tetris")
cfg = compose(config_name="default")
return cfg
#累積値の初期化
def reset_state(self):
if self.score > self.max_score:
torch.save(self.model, "{}/tetris_epoch_{}_score{}".format(self.saved_path,self.epoch,self.score))
self.max_score = self.score
self.max_weight = "{}/tetris_epoch_{}_score{}".format(self.saved_path,self.epoch,self.score)
self.state = self.initial_state
self.score = 0
self.cleared_lines = 0
self.epoch_reward = 0
self.tetrominoes = 0
#削除される列を数える
def check_cleared_rows(self,board):
board_new = np.copy(board)
lines = 0
empty_line = np.array([0 for i in range(self.width)])
for y in range(self.height - 1, -1, -1):
blockCount = np.sum(board[y])
if blockCount == self.width:
lines += 1
board_new = np.delete(board_new,y,0)
board_new = np.vstack([empty_line,board_new ])
return lines,board_new
#各列毎の高さの差を計算
def get_bumpiness_and_height(self,board):
mask = board != 0
invert_heights = np.where(mask.any(axis=0), np.argmax(mask, axis=0), self.height)
heights = self.height - invert_heights
total_height = np.sum(heights)
currs = heights[:-1]
nexts = heights[1:]
diffs = np.abs(currs - nexts)
total_bumpiness = np.sum(diffs)
return total_bumpiness, total_height
#各列の穴の個数を数える
def get_holes(self, board):
num_holes = 0
for i in range(self.width):
col = board[:,i]
row = 0
while row < self.height and col[row] == 0:
row += 1
num_holes += len([x for x in col[row + 1:] if x == 0])
return num_holes
#
def get_state_properties(self, board):
lines_cleared, board = self.check_cleared_rows(board)
holes = self.get_holes(board)
bumpiness, height = self.get_bumpiness_and_height(board)
return torch.FloatTensor([lines_cleared, holes, bumpiness, height])
def get_state_properties_v2(self, board):
lines_cleared, board = self.check_cleared_rows(board)
holes = self.get_holes(board)
bumpiness, height = self.get_bumpiness_and_height(board)
max_row = self.get_max_height(board)
return torch.FloatTensor([lines_cleared, holes, bumpiness, height,max_row])
def get_max_height(self, board):
sum_ = np.sum(board,axis=1)
row = 0
while row < self.height and sum_[row] ==0:
row += 1
return self.height - row
#次の状態を取得(2次元用)
def get_next_states_v2(self,curr_backboard,piece_id,CurrentShape_class):
states = {}
if piece_id == 5: # O piece
num_rotations = 1
elif piece_id == 1 or piece_id == 6 or piece_id == 7:
num_rotations = 2
else:
num_rotations = 4
for direction0 in range(num_rotations):
x0Min, x0Max = self.getSearchXRange(CurrentShape_class, direction0)
for x0 in range(x0Min, x0Max):
# get board data, as if dropdown block
board = self.getBoard(curr_backboard, CurrentShape_class, direction0, x0)
reshape_backboard = self.get_reshape_backboard(board)
reshape_backboard = torch.from_numpy(reshape_backboard[np.newaxis,:,:]).float()
states[(x0, direction0)] = reshape_backboard
return states
#次の状態を取得(1次元用)
def get_next_states(self,curr_backboard,piece_id,CurrentShape_class):
states = {}
if piece_id == 5: # O piece
num_rotations = 1
elif piece_id == 1 or piece_id == 6 or piece_id == 7:
num_rotations = 2
else:
num_rotations = 4
for direction0 in range(num_rotations):
x0Min, x0Max = self.getSearchXRange(CurrentShape_class, direction0)
for x0 in range(x0Min, x0Max):
# get board data, as if dropdown block
board = self.getBoard(curr_backboard, CurrentShape_class, direction0, x0)
board = self.get_reshape_backboard(board)
states[(x0, direction0)] = self.get_state_properties(board)
return states
#ボードを2次元化
def get_reshape_backboard(self,board):
board = np.array(board)
reshape_board = board.reshape(self.height,self.width)
reshape_board = np.where(reshape_board>0,1,0)
return reshape_board
#報酬を計算(2次元用)
def step_v2(self, curr_backboard,action,curr_shape_class):
x0, direction0 = action
board = self.getBoard(curr_backboard, curr_shape_class, direction0, x0)
board = self.get_reshape_backboard(board)
bampiness,height = self.get_bumpiness_and_height(board)
max_height = self.get_max_height(board)
hole_num = self.get_holes(board)
lines_cleared, board = self.check_cleared_rows(board)
reward = self.reward_list[lines_cleared]
reward -= self.reward_weight[0] *bampiness
reward -= self.reward_weight[1] * max(0,max_height-(self.height/2))
reward -= self.reward_weight[2] * hole_num
self.epoch_reward += reward
self.score += self.score_list[lines_cleared]
self.cleared_lines += lines_cleared
self.tetrominoes += 1
return reward
#報酬を計算(1次元用)
def step(self, curr_backboard,action,curr_shape_class):
x0, direction0 = action
board = self.getBoard(curr_backboard, curr_shape_class, direction0, x0)
board = self.get_reshape_backboard(board)
lines_cleared, board = self.check_cleared_rows(board)
reward = self.reward_list[lines_cleared]
self.epoch_reward += reward
self.score += self.score_list[lines_cleared]
self.cleared_lines += lines_cleared
self.tetrominoes += 1
return reward
def GetNextMove(self, nextMove, GameStatus,weight=None):
t1 = datetime.now()
self.mode = GameStatus["judge_info"]["mode"]
if self.init_train_parameter_flag == False:
self.init_train_parameter_flag = True
self.set_parameter(weight=weight)
self.ind =GameStatus["block_info"]["currentShape"]["index"]
curr_backboard = GameStatus["field_info"]["backboard"]
# default board definition
self.board_data_width = GameStatus["field_info"]["width"]
self.board_data_height = GameStatus["field_info"]["height"]
curr_shape_class = GameStatus["block_info"]["currentShape"]["class"]
next_shape_class= GameStatus["block_info"]["nextShape"]["class"]
# next shape info
self.ShapeNone_index = GameStatus["debug_info"]["shape_info"]["shapeNone"]["index"]
curr_piece_id =GameStatus["block_info"]["currentShape"]["index"]
next_piece_id =GameStatus["block_info"]["nextShape"]["index"]
reshape_backboard = self.get_reshape_backboard(curr_backboard)
#self.state = reshape_backboard
next_steps =self.get_next_func(curr_backboard,curr_piece_id,curr_shape_class)
if self.mode == "train":
# init parameter
epsilon = self.final_epsilon + (max(self.num_decay_epochs - self.epoch, 0) * (
self.initial_epsilon - self.final_epsilon) / self.num_decay_epochs)
u = random()
random_action = u <= epsilon
next_actions, next_states = zip(*next_steps.items())
next_states = torch.stack(next_states)
if torch.cuda.is_available():
next_states = next_states.cuda()
self.model.train()
with torch.no_grad():
predictions = self.model(next_states)[:, 0]
if random_action:
index = randint(0, len(next_steps) - 1)
else:
index = torch.argmax(predictions).item()
next_state = next_states[index, :]
action = next_actions[index]
reward = self.reward_func(curr_backboard,action,curr_shape_class)
done = False #game over flag
#======predict max_a Q(s_(t+1),a)======
#if use double dqn, predicted by main model
if self.double_dqn:
next_backboard = self.getBoard(curr_backboard, curr_shape_class, action[1], action[0])
next2_steps =self.get_next_func(next_backboard,next_piece_id,next_shape_class)
next2_actions, next2_states = zip(*next2_steps.items())
next2_states = torch.stack(next2_states)
if torch.cuda.is_available():
next2_states = next2_states.cuda()
self.model.train()
with torch.no_grad():
next_predictions = self.model(next2_states)[:, 0]
next_index = torch.argmax(next_predictions).item()
next2_state = next2_states[next_index, :]
#if use target net, predicted by target model
elif self.target_net:
next_backboard = self.getBoard(curr_backboard, curr_shape_class, action[1], action[0])
next2_steps =self.get_next_func(next_backboard,next_piece_id,next_shape_class)
next2_actions, next2_states = zip(*next2_steps.items())
next2_states = torch.stack(next2_states)
if torch.cuda.is_available():
next2_states = next2_states.cuda()
self.target_model.train()
with torch.no_grad():
next_predictions = self.target_model(next2_states)[:, 0]
next_index = torch.argmax(next_predictions).item()
next2_state = next2_states[next_index, :]
#if not use target net,predicted by main model
else:
next_backboard = self.getBoard(curr_backboard, curr_shape_class, action[1], action[0])
next2_steps =self.get_next_func(next_backboard,next_piece_id,next_shape_class)
next2_actions, next2_states = zip(*next2_steps.items())
next2_states = torch.stack(next2_states)
if torch.cuda.is_available():
next2_states = next2_states.cuda()
self.model.train()
with torch.no_grad():
next_predictions = self.model(next2_states)[:, 0]
epsilon = self.final_epsilon + (max(self.num_decay_epochs - self.epoch, 0) * (
self.initial_epsilon - self.final_epsilon) / self.num_decay_epochs)
u = random()
random_action = u <= epsilon
if random_action:
next_index = randint(0, len(next2_steps) - 1)
else:
next_index = torch.argmax(next_predictions).item()
next2_state = next2_states[next_index, :]
#=======================================
self.replay_memory.append([next_state, reward, next2_state,done])
if self.prioritized_replay:
self.PER.store()
#self.replay_memory.append([self.state, reward, next_state,done])
nextMove["strategy"]["direction"] = action[1]
nextMove["strategy"]["x"] = action[0]
nextMove["strategy"]["y_operation"] = 1
nextMove["strategy"]["y_moveblocknum"] = 1
self.state = next_state
elif self.mode == "predict":
self.model.eval()
next_actions, next_states = zip(*next_steps.items())
next_states = torch.stack(next_states)
predictions = self.model(next_states)[:, 0]
index = torch.argmax(predictions).item()
action = next_actions[index]
nextMove["strategy"]["direction"] = action[1]
nextMove["strategy"]["x"] = action[0]
nextMove["strategy"]["y_operation"] = 1
nextMove["strategy"]["y_moveblocknum"] = 1
return nextMove
def getSearchXRange(self, Shape_class, direction):
#
# get x range from shape direction.
#
minX, maxX, _, _ = Shape_class.getBoundingOffsets(direction) # get shape x offsets[minX,maxX] as relative value.
xMin = -1 * minX
xMax = self.board_data_width - maxX
return xMin, xMax
def getShapeCoordArray(self, Shape_class, direction, x, y):
#
# get coordinate array by given shape.
#
coordArray = Shape_class.getCoords(direction, x, y) # get array from shape direction, x, y.
return coordArray
def getBoard(self, board_backboard, Shape_class, direction, x):
#
# get new board.
#
# copy backboard data to make new board.
# if not, original backboard data will be updated later.
board = copy.deepcopy(board_backboard)
_board = self.dropDown(board, Shape_class, direction, x)
return _board
def dropDown(self, board, Shape_class, direction, x):
#
# internal function of getBoard.
# -- drop down the shape on the board.
#
dy = self.board_data_height - 1
coordArray = self.getShapeCoordArray(Shape_class, direction, x, 0)
# update dy
for _x, _y in coordArray:
_yy = 0
while _yy + _y < self.board_data_height and (_yy + _y < 0 or board[(_y + _yy) * self.board_data_width + _x] == self.ShapeNone_index):
_yy += 1
_yy -= 1
if _yy < dy:
dy = _yy
# get new board
_board = self.dropDownWithDy(board, Shape_class, direction, x, dy)
return _board
def dropDownWithDy(self, board, Shape_class, direction, x, dy):
#
# internal function of dropDown.
#
_board = board
coordArray = self.getShapeCoordArray(Shape_class, direction, x, 0)
for _x, _y in coordArray:
_board[(_y + dy) * self.board_data_width + _x] = Shape_class.shape
return _board
BLOCK_CONTROLLER_TRAIN = Block_Controller()
| 41.929739
| 145
| 0.575465
| 25,395
| 0.981981
| 0
| 0
| 0
| 0
| 0
| 0
| 3,018
| 0.116701
|
dd5da8896dcd0d60de2ab7a1e52c8732ee3a4ea8
| 405
|
py
|
Python
|
erp/migrations/0123_auto_20210513_1720.py
|
bzg/acceslibre
|
52c7c6990dc132da71a92e856d65f4a983c3b15a
|
[
"MIT"
] | 8
|
2020-07-23T08:17:28.000Z
|
2022-03-09T22:31:36.000Z
|
erp/migrations/0123_auto_20210513_1720.py
|
bzg/acceslibre
|
52c7c6990dc132da71a92e856d65f4a983c3b15a
|
[
"MIT"
] | 37
|
2020-07-01T08:47:33.000Z
|
2022-02-03T19:50:58.000Z
|
erp/migrations/0123_auto_20210513_1720.py
|
bzg/acceslibre
|
52c7c6990dc132da71a92e856d65f4a983c3b15a
|
[
"MIT"
] | 4
|
2021-04-08T10:57:18.000Z
|
2022-01-31T13:16:31.000Z
|
# Generated by Django 3.2 on 2021-05-13 15:20
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("erp", "0122_auto_20210513_1720"),
]
operations = [
migrations.AlterField(
model_name="erp",
name="uuid",
field=models.UUIDField(default=uuid.uuid4, unique=True),
),
]
| 20.25
| 68
| 0.604938
| 302
| 0.745679
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.212346
|
dd5e767c2f0f3137c08350fa015010541f736e41
| 290
|
py
|
Python
|
migrations/824-auto-exclude-regions.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
migrations/824-auto-exclude-regions.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
migrations/824-auto-exclude-regions.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
from mkt.constants import regions
from mkt.developers.cron import exclude_new_region
def run():
exclude_new_region([
regions.CR,
regions.EC,
regions.FR,
regions.GT,
regions.IT,
regions.NI,
regions.PA,
regions.SV,
])
| 18.125
| 50
| 0.586207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dd5ec06ae412be00165dc082fa38f505f00c44d7
| 2,959
|
py
|
Python
|
qa/rpc-tests/checkpoint-load.py
|
ericramos1980/energi
|
aadc44f714f9d52433ab3595a9f33a61433c60c9
|
[
"MIT"
] | 2
|
2021-12-28T21:47:07.000Z
|
2022-02-09T21:04:29.000Z
|
qa/rpc-tests/checkpoint-load.py
|
reddragon34/energi
|
4cc6c426d9d4b6b9053912de9b2197eba071201e
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/checkpoint-load.py
|
reddragon34/energi
|
4cc6c426d9d4b6b9053912de9b2197eba071201e
|
[
"MIT"
] | 1
|
2019-10-07T19:17:55.000Z
|
2019-10-07T19:17:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Energi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
class CheckpointLoadTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
node_args = ["-keypool=10", "-debug=stake", "-debug=net",
"-addcheckpoint=10:abcdef01234456789", "-checkpoints=0"]
self.extra_args = [node_args, node_args, node_args]
self.node_args = node_args
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 1, 2)
self.is_network_split=False
def run_test(self):
self.sync_all()
logging.info("Generating initial blockchain")
self.nodes[0].generate(20)
self.sync_all()
assert_equal(self.nodes[0].getinfo()['blocks'], 20)
logging.info("Enabling checkpoints")
stop_nodes(self.nodes)
node_args = list(self.node_args)
node_args[-1] = "-checkpoints=1"
self.extra_args[0] = node_args;
self.setup_network()
sync_blocks(self.nodes[1:])
assert_equal(self.nodes[0].getinfo()['blocks'], 9)
assert_equal(self.nodes[1].getinfo()['blocks'], 20)
assert_equal(self.nodes[2].getinfo()['blocks'], 20)
logging.info("Adding more blocks")
self.nodes[1].generate(3)
sync_blocks(self.nodes[1:])
assert_equal(self.nodes[0].getinfo()['blocks'], 9)
assert_equal(self.nodes[1].getinfo()['blocks'], 23)
assert_equal(self.nodes[2].getinfo()['blocks'], 23)
logging.info("Adding more block on alt chain")
stop_nodes(self.nodes)
self.extra_args[0] = self.node_args
self.nodes = start_nodes(1, self.options.tmpdir, self.extra_args)
self.nodes[0].generate(30)
stop_nodes(self.nodes)
self.setup_network()
self.sync_all()
assert_equal(self.nodes[0].getinfo()['blocks'], 39)
assert_equal(self.nodes[1].getinfo()['blocks'], 39)
assert_equal(self.nodes[2].getinfo()['blocks'], 39)
logging.info("Restart to check no issues appear")
stop_nodes(self.nodes)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
self.sync_all()
assert_equal(self.nodes[0].getinfo()['blocks'], 39)
assert_equal(self.nodes[1].getinfo()['blocks'], 39)
assert_equal(self.nodes[2].getinfo()['blocks'], 39)
if __name__ == '__main__':
CheckpointLoadTest().main()
| 37.935897
| 86
| 0.639743
| 2,576
| 0.870564
| 0
| 0
| 0
| 0
| 0
| 0
| 566
| 0.191281
|
dd5ffb792de44849ba525e817187b550fe21e9d9
| 648
|
py
|
Python
|
python/setup.py
|
tcolgate/gracetests
|
552c8113b0554d49cf146e6d7cfd573c8b4cbf8f
|
[
"MIT"
] | 2
|
2019-07-30T16:50:20.000Z
|
2021-11-26T22:46:29.000Z
|
python/setup.py
|
tcolgate/gracetests
|
552c8113b0554d49cf146e6d7cfd573c8b4cbf8f
|
[
"MIT"
] | null | null | null |
python/setup.py
|
tcolgate/gracetests
|
552c8113b0554d49cf146e6d7cfd573c8b4cbf8f
|
[
"MIT"
] | 1
|
2019-07-30T16:50:54.000Z
|
2019-07-30T16:50:54.000Z
|
import os
from setuptools import find_packages, setup
DIR = os.path.dirname(os.path.abspath(__file__))
setup(
name='graceful',
version='1.2.0',
description='test of graceful shutdown',
url='https://github.com/qubitdigital/graceful/python',
author='Infra',
author_email='infra@qubit.com',
license='All rights reserved.',
packages=find_packages(),
install_requires=[
'sanic==0.7.0',
'ujson==1.35',
'python-dotenv==0.8.2',
'cchardet==2.1.1',
],
zip_safe=False,
entry_points={
'console_scripts': [
'graceful=graceful.server:main',
]
}
)
| 22.344828
| 58
| 0.603395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 253
| 0.390432
|
dd61bcf996578639e6980380d244d98682bbdae6
| 812
|
py
|
Python
|
src/mylog/logger.py
|
JoeBuzh/Pm_Composition_Quallity_Control
|
676c508a37c9a3774bf223e7abd53db04df771d7
|
[
"MIT"
] | 1
|
2020-07-23T04:13:02.000Z
|
2020-07-23T04:13:02.000Z
|
src/runtime/mylog/mylog.py
|
JoeBuzh/DeepWeather
|
2677edc16d9865ec98401aaf121aaabd24974aaf
|
[
"MIT"
] | null | null | null |
src/runtime/mylog/mylog.py
|
JoeBuzh/DeepWeather
|
2677edc16d9865ec98401aaf121aaabd24974aaf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import logging
import logging.handlers
import time
def init_logger(log_file):
dir_path = os.path.dirname(log_file)
try:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
except Exception as e:
pass
handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=30 * 1024 * 1024, backupCount=10)
fmt = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
#logger_instance = logging.getLogger('logs')
logger_instance = logging.getLogger(log_file.split("/")[-1])
logger_instance.addHandler(handler)
#logger_instance.setLevel(logging.DEBUG)
logger_instance.setLevel(logging.INFO)
return logger_instance
| 31.230769
| 103
| 0.699507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 179
| 0.220443
|
dd61ede10dd7a8e91db98cff1eeb2bd9cfadde8d
| 637
|
py
|
Python
|
convert_assets.py
|
michaelgold/usdzconvert
|
f4e6e552db4e27a3e088649f19f6bd61977501c1
|
[
"MIT"
] | null | null | null |
convert_assets.py
|
michaelgold/usdzconvert
|
f4e6e552db4e27a3e088649f19f6bd61977501c1
|
[
"MIT"
] | null | null | null |
convert_assets.py
|
michaelgold/usdzconvert
|
f4e6e552db4e27a3e088649f19f6bd61977501c1
|
[
"MIT"
] | null | null | null |
import glob
import os
import subprocess
import shutil
source_file_list = glob.glob("../source/assets/*.glb")
for input_file_name in source_file_list:
base_file_name = os.path.split(input_file_name)[1]
output_file_name = "../dist/assets/{}.usdz".format(os.path.splitext(base_file_name)[0])
print(output_file_name)
subprocess.call("python run_usd.py usdzconvert/usdzconvert {} {}".format(input_file_name, output_file_name), shell=True)
for glb_file in source_file_list:
print(glb_file)
destination = "../dist/assets/{}".format(os.path.split(glb_file)[1])
shutil.move(glb_file, destination)
| 35.388889
| 125
| 0.726845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.182104
|
dd6374321202c04d2e14fb1f005b96daccd1a28b
| 36
|
py
|
Python
|
csv_json/__init__.py
|
Rerice/Csv_json_converter
|
54bf0f839d8fff203c1cf96812b96af915449fef
|
[
"MIT"
] | 3
|
2021-06-02T15:32:45.000Z
|
2021-12-13T10:12:25.000Z
|
csv_json/__init__.py
|
Rerice/Csv_json_converter
|
54bf0f839d8fff203c1cf96812b96af915449fef
|
[
"MIT"
] | null | null | null |
csv_json/__init__.py
|
Rerice/Csv_json_converter
|
54bf0f839d8fff203c1cf96812b96af915449fef
|
[
"MIT"
] | 2
|
2021-06-02T15:30:42.000Z
|
2021-06-02T17:28:41.000Z
|
from csv_json.csv_json_conv import *
| 36
| 36
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dd63b219f3cb356db869fd14f9d34da5dd98361a
| 463
|
py
|
Python
|
Constants.py
|
HKN-UCSD/HIIT
|
63f726398ca63c92e82a8052a05608a8537e6e87
|
[
"MIT"
] | null | null | null |
Constants.py
|
HKN-UCSD/HIIT
|
63f726398ca63c92e82a8052a05608a8537e6e87
|
[
"MIT"
] | null | null | null |
Constants.py
|
HKN-UCSD/HIIT
|
63f726398ca63c92e82a8052a05608a8537e6e87
|
[
"MIT"
] | 1
|
2019-08-18T19:23:44.000Z
|
2019-08-18T19:23:44.000Z
|
COLUMNS = ['First Name', 'Last Name', 'Class Standing', 'Cum GPA', 'Major Code', 'Dept', 'Email']
DEPTS = ['CSE', 'ECE', 'MATH', 'BENG']
CLASS_STANDINGS = ['SO', 'JR', 'SR']
DEPTS_MAJORS = dict() # bit of a faux-pas...
DEPTS_MAJORS['CSE'] = ['CS25', 'CS26', 'CS27', 'CS28']
DEPTS_MAJORS['ECE'] = ['EC26', 'EC27', 'EC28']
DEPTS_MAJORS['MATH'] = ['MA30']
DEPTS_MAJORS['BENG'] = ['BE25', 'BE26', 'BE27', 'BE28']
CLASS_QUANTILE = {'SO': 0.8, 'JR': 0.75, 'SR': 0.667}
| 51.444444
| 97
| 0.580994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 235
| 0.507559
|
dd6453c36feaa6225a2781434ed2cbc21f1477e2
| 258
|
py
|
Python
|
app/admin.py
|
Syilun/aiot-django-dashboard-postgresql
|
3aa607bd623006a4c99a97da304985eb908741c8
|
[
"MIT"
] | null | null | null |
app/admin.py
|
Syilun/aiot-django-dashboard-postgresql
|
3aa607bd623006a4c99a97da304985eb908741c8
|
[
"MIT"
] | null | null | null |
app/admin.py
|
Syilun/aiot-django-dashboard-postgresql
|
3aa607bd623006a4c99a97da304985eb908741c8
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib import admin
from .models import PERSON
from .models import FACE
# Register your models here.
admin.site.register(PERSON)
admin.site.register(FACE)
| 17.2
| 39
| 0.728682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.437984
|
dd646968d684c4605ba1ae783d48a3452bbee81d
| 933
|
py
|
Python
|
Python/python-tutorials/networking/tcp_server.py
|
zhongyangynag/code-study
|
5410929554107a384a09d899c6fa3d16ed383d2b
|
[
"MIT"
] | null | null | null |
Python/python-tutorials/networking/tcp_server.py
|
zhongyangynag/code-study
|
5410929554107a384a09d899c6fa3d16ed383d2b
|
[
"MIT"
] | null | null | null |
Python/python-tutorials/networking/tcp_server.py
|
zhongyangynag/code-study
|
5410929554107a384a09d899c6fa3d16ed383d2b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import re
import sys
import socket
import SocketServer
import struct
import fcntl
import sys
def getip(ethname):
if ethname=="":
ethname="eth0"
try:
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip=socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0X8915, struct.pack('256s', ethname[:15]))[20:24])
except:
ip=""
return ip
class MyHandler(SocketServer.BaseRequestHandler):
def setup(self):
self.allow_reuse_address=True
return SocketServer.BaseRequestHandler.setup(self)
def finish(self):
return SocketServer.BaseRequestHandler.finish(self)
def handle(self):
#size=self.request.recv(4)
buf=self.request.recv(2048).replace('\n','')
print "Recv from %s: %s" %(self.client_address[0], buf)
# send back message
self.request.sendall(buf)
if __name__=='__main__':
# start server
server = SocketServer.ThreadingTCPServer( (getip("eth0"),44444), MyHandler)
server.serve_forever()
| 21.697674
| 96
| 0.736334
| 431
| 0.461951
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.145766
|
dd64daf0644c28687a4705d4e8b356d44e031ab4
| 2,190
|
py
|
Python
|
tests/test_examples.py
|
timgates42/goless
|
3c8742fa0f94d0a365840aae404da4e8eaed9d71
|
[
"Apache-2.0"
] | 266
|
2015-01-03T04:18:48.000Z
|
2022-02-16T03:08:38.000Z
|
tests/test_examples.py
|
timgates42/goless
|
3c8742fa0f94d0a365840aae404da4e8eaed9d71
|
[
"Apache-2.0"
] | 19
|
2015-03-06T11:04:53.000Z
|
2021-06-09T15:08:57.000Z
|
tests/test_examples.py
|
timgates42/goless
|
3c8742fa0f94d0a365840aae404da4e8eaed9d71
|
[
"Apache-2.0"
] | 20
|
2015-01-03T03:45:08.000Z
|
2022-03-05T06:05:32.000Z
|
"""
Idiomatic Go examples converted to use goless.
"""
from __future__ import print_function
import time
from . import BaseTests
import goless
class Examples(BaseTests):
def test_select(self):
# https://gobyexample.com/select
c1 = goless.chan()
c2 = goless.chan()
def func1():
time.sleep(.1)
c1.send('one')
goless.go(func1)
def func2():
time.sleep(.2)
c2.send('two')
goless.go(func2)
# We don't print since we run this as a test.
callbacks = []
for i in range(2):
_, val = goless.select([goless.rcase(c1), goless.rcase(c2)])
callbacks.append(val)
self.assertEqual(callbacks, ['one', 'two'])
def test_range_over_channels(self):
# https://gobyexample.com/range-over-channels
queue = goless.chan(2)
queue.send('one')
queue.send('two')
queue.close()
elements = [elem for elem in queue]
self.assertEqual(elements, ['one', 'two'])
def test_worker_pool(self):
# https://gobyexample.com/worker-pools
jobs_done = []
# noinspection PyShadowingNames,PyShadowingBuiltins
def worker(id, jobs, results):
for j in jobs:
jobs_done.append('w %s j %s' % (id, j))
time.sleep(.01)
results.send(j * 2)
jobs = goless.chan(100)
results = goless.chan(100)
for w in range(1, 4):
goless.go(lambda: worker(w, jobs, results))
for j in range(1, 10):
jobs.send(j)
jobs.close()
for a in range(1, 10):
results.recv()
self.assertEqual(len(jobs_done), 9)
def test_case_switch(self):
chan = goless.chan()
cases = [goless.rcase(chan), goless.scase(chan, 1), goless.dcase()]
chosen, value = goless.select(cases)
if chosen is cases[0]:
print('Received %s' % value)
elif chosen is cases[1]:
assert value is None
print('Sent.')
else:
assert chosen is cases[2], chosen
print('Default...')
| 26.071429
| 75
| 0.541096
| 2,042
| 0.93242
| 0
| 0
| 0
| 0
| 0
| 0
| 348
| 0.158904
|
dd67590d08d500fd8ab7568abbfffa79b1097a7f
| 3,211
|
py
|
Python
|
Utils/Messaging.py
|
philshams/FC_analysis
|
cabe2385d5061d206a21b230605bfce9e39ec7f2
|
[
"MIT"
] | null | null | null |
Utils/Messaging.py
|
philshams/FC_analysis
|
cabe2385d5061d206a21b230605bfce9e39ec7f2
|
[
"MIT"
] | null | null | null |
Utils/Messaging.py
|
philshams/FC_analysis
|
cabe2385d5061d206a21b230605bfce9e39ec7f2
|
[
"MIT"
] | null | null | null |
from slackclient import SlackClient
import requests
import os
from Config import slack_env_var_token, slack_username
"""
These functions take care of sending slack messages and emails
"""
def slack_chat_messenger(message):
# NEVER LEAVE THE TOKEN IN YOUR CODE ON GITHUB, EVERYBODY WOULD HAVE ACCESS TO THE CHANNEL!
slack_token = os.environ.get(slack_env_var_token)
slack_client = SlackClient(slack_token)
api_call = slack_client.api_call("im.list")
user_slack_id = slack_username
# You should either know the user_slack_id to send a direct msg to the user
if api_call.get('ok'):
for im in api_call.get("ims"):
if im.get("user") == user_slack_id:
im_channel = im.get("id")
slack_client.api_call("chat.postMessage", channel=im_channel, text=message, as_user=False)
def slack_chat_attachments(filepath):
slack_chat_messenger('Trying to send you {}'.format(filepath))
slack_token = os.environ.get(slack_env_var_token)
my_file = {
'file': (filepath+'.png', open(filepath+'.png', 'rb'), 'image/png', {
'Expires': '0'
})
}
payload = {
"filename":filepath+'.png',
"token":slack_token,
"channels": ['@Fede'],
"media": my_file
}
r = requests.post("https://slack.com/api/files.upload", params=payload, files=my_file)
print(r.text)
def upload_file( filepath ):
"""Upload file to channel
Note:
URLs can be constructed from:
https://api.slack.com/methods/files.upload/test
"""
slack_chat_messenger('Trying to send you {}'.format(filepath))
slack_token = os.environ.get(slack_env_var_token)
data = {}
data['token'] = slack_token
data['file'] = filepath
data['filename'] = filepath
data['channels'] = [slack_username]
data['display_as_bot'] = True
filepath = data['file']
files = {
'content': (filepath, open(filepath, 'rb'), 'image/png', {
'Expires': '0'
})
}
data['media'] = files
response = requests.post(
url='https://slack.com/api/files.upload',
data=data,
headers={'Accept': 'application/json'},
files=files)
print(response.text)
def send_email_attachments(filename, filepath):
import smtplib
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# Create the container (outer) email message.
msg = MIMEMultipart()
msg['Subject'] = filename
# me == the sender's email address
# family = the list of all recipients' email addresses
msg['From'] = 'federicopython@gmail.com'
msg['To'] = 'federicoclaudi@gmail.com'
body = "Analysis results"
msg.attach(MIMEText(body, 'plain'))
with open(filepath+'.png', 'rb') as fp:
img = MIMEImage(fp.read())
msg.attach(img)
# Send the email via our own SMTP server.
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login('federicopython@gmail.com', '')
server.sendmail('federicopython@gmail.com', 'federicoclaudi@gmail.com', msg.as_string())
server.quit()
| 28.669643
| 106
| 0.646528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,135
| 0.353472
|
dd67b0be318886c07c1697a2ac8415ae63718592
| 71,403
|
py
|
Python
|
venv/lib/python3.8/site-packages/spaceone/api/monitoring/v1/alert_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/monitoring/v1/alert_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/monitoring/v1/alert_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: spaceone/api/monitoring/v1/alert.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from spaceone.api.core.v1 import query_pb2 as spaceone_dot_api_dot_core_dot_v1_dot_query__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='spaceone/api/monitoring/v1/alert.proto',
package='spaceone.api.monitoring.v1',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n&spaceone/api/monitoring/v1/alert.proto\x12\x1aspaceone.api.monitoring.v1\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto\x1a spaceone/api/core/v1/query.proto\"I\n\rAlertResource\x12\x13\n\x0bresource_id\x18\x01 \x01(\t\x12\x15\n\rresource_type\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\"<\n\x0e\x41lertResponder\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x13\n\x0bresource_id\x18\x02 \x01(\t\"\xac\x01\n\x12\x43reateAlertRequest\x12\r\n\x05title\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x10\n\x08\x61ssignee\x18\x03 \x01(\t\x12\x39\n\x07urgency\x18\x04 \x01(\x0e\x32(.spaceone.api.monitoring.v1.AlertUrgency\x12\x12\n\nproject_id\x18\x05 \x01(\t\x12\x11\n\tdomain_id\x18\x0b \x01(\t\"\xb6\x02\n\x12UpdateAlertRequest\x12\x10\n\x08\x61lert_id\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\r\n\x05state\x18\x03 \x01(\t\x12\x16\n\x0estatus_message\x18\x04 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12\x10\n\x08\x61ssignee\x18\x06 \x01(\t\x12\x39\n\x07urgency\x18\x07 \x01(\x0e\x32(.spaceone.api.monitoring.v1.AlertUrgency\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x1c\n\x14reset_status_message\x18\x0b \x01(\x08\x12\x19\n\x11reset_description\x18\x0c \x01(\x08\x12\x16\n\x0ereset_assignee\x18\r \x01(\x08\x12\x11\n\tdomain_id\x18\x15 \x01(\t\"N\n\x17UpdateAlertStateRequest\x12\x10\n\x08\x61lert_id\x18\x01 \x01(\t\x12\x12\n\naccess_key\x18\x02 \x01(\t\x12\r\n\x05state\x18\x03 \x01(\t\"H\n\x11MergeAlertRequest\x12\x0e\n\x06\x61lerts\x18\x01 \x03(\t\x12\x10\n\x08merge_to\x18\x02 \x01(\t\x12\x11\n\tdomain_id\x18\x03 \x01(\t\"K\n\x12SnoozeAlertRequest\x12\x10\n\x08\x61lert_id\x18\x01 \x01(\t\x12\x10\n\x08\x65nd_time\x18\x02 \x01(\t\x12\x11\n\tdomain_id\x18\x03 \x01(\t\"h\n\x15\x41lertResponderRequest\x12\x10\n\x08\x61lert_id\x18\x01 \x01(\t\x12\x15\n\rresource_type\x18\x02 \x01(\t\x12\x13\n\x0bresource_id\x18\x03 \x01(\t\x12\x11\n\tdomain_id\x18\x04 \x01(\t\"X\n\x1d\x41lertProjectDependencyRequest\x12\x10\n\x08\x61lert_id\x18\x01 \x01(\t\x12\x12\n\nproject_id\x18\x02 \x01(\t\x12\x11\n\tdomain_id\x18\x03 \x01(\t\"3\n\x0c\x41lertRequest\x12\x10\n\x08\x61lert_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\"D\n\x0fGetAlertRequest\x12\x10\n\x08\x61lert_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\x12\x0c\n\x04only\x18\x03 \x03(\t\"\x9d\x03\n\nAlertQuery\x12*\n\x05query\x18\x01 \x01(\x0b\x32\x1b.spaceone.api.core.v1.Query\x12\x14\n\x0c\x61lert_number\x18\x02 \x01(\x05\x12\x10\n\x08\x61lert_id\x18\x03 \x01(\t\x12\r\n\x05title\x18\x04 \x01(\t\x12\x35\n\x05state\x18\x05 \x01(\x0e\x32&.spaceone.api.monitoring.v1.AlertState\x12\x10\n\x08\x61ssignee\x18\x06 \x01(\t\x12\x39\n\x07urgency\x18\x07 \x01(\x0e\x32(.spaceone.api.monitoring.v1.AlertUrgency\x12\x10\n\x08severity\x18\x08 \x01(\t\x12\x12\n\nis_snoozed\x18\t \x01(\t\x12\x13\n\x0bresource_id\x18\n \x01(\t\x12\x14\n\x0ctriggered_by\x18\x0b \x01(\t\x12\x12\n\nwebhook_id\x18\x0c \x01(\t\x12\x1c\n\x14\x65scalation_policy_id\x18\r \x01(\t\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x11\n\tdomain_id\x18\x0f \x01(\t\"\x9a\x06\n\tAlertInfo\x12\x14\n\x0c\x61lert_number\x18\x01 \x01(\x05\x12\x10\n\x08\x61lert_id\x18\x02 \x01(\t\x12\r\n\x05title\x18\x03 \x01(\t\x12\x35\n\x05state\x18\x04 \x01(\x0e\x32&.spaceone.api.monitoring.v1.AlertState\x12\x16\n\x0estatus_message\x18\x05 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x06 \x01(\t\x12\x10\n\x08\x61ssignee\x18\x07 \x01(\t\x12\x39\n\x07urgency\x18\x08 \x01(\x0e\x32(.spaceone.api.monitoring.v1.AlertUrgency\x12\x10\n\x08severity\x18\t \x01(\t\x12\x0c\n\x04rule\x18\n \x01(\t\x12;\n\x08resource\x18\x0b \x01(\x0b\x32).spaceone.api.monitoring.v1.AlertResource\x12\x30\n\x0f\x61\x64\x64itional_info\x18\x0c \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\nis_snoozed\x18\r \x01(\x08\x12\x18\n\x10snoozed_end_time\x18\x0e \x01(\t\x12\x17\n\x0f\x65scalation_step\x18\x0f \x01(\x05\x12\x16\n\x0e\x65scalation_ttl\x18\x10 \x01(\x05\x12>\n\nresponders\x18\x11 \x03(\x0b\x32*.spaceone.api.monitoring.v1.AlertResponder\x12\x1c\n\x14project_dependencies\x18\x12 \x03(\t\x12\x14\n\x0ctriggered_by\x18\x15 \x01(\t\x12\x12\n\nwebhook_id\x18\x16 \x01(\t\x12\x1c\n\x14\x65scalation_policy_id\x18\x17 \x01(\t\x12\x12\n\nproject_id\x18\x18 \x01(\t\x12\x11\n\tdomain_id\x18\x19 \x01(\t\x12\x12\n\ncreated_at\x18\x1f \x01(\t\x12\x12\n\nupdated_at\x18 \x01(\t\x12\x17\n\x0f\x61\x63knowledged_at\x18! \x01(\t\x12\x13\n\x0bresolved_at\x18\" \x01(\t\x12\x14\n\x0c\x65scalated_at\x18# \x01(\t\"Y\n\nAlertsInfo\x12\x36\n\x07results\x18\x01 \x03(\x0b\x32%.spaceone.api.monitoring.v1.AlertInfo\x12\x13\n\x0btotal_count\x18\x02 \x01(\x05\"Y\n\x0e\x41lertStatQuery\x12\x34\n\x05query\x18\x01 \x01(\x0b\x32%.spaceone.api.core.v1.StatisticsQuery\x12\x11\n\tdomain_id\x18\x02 \x01(\t*9\n\x0c\x41lertUrgency\x12\x16\n\x12\x41LERT_URGENCY_NONE\x10\x00\x12\x08\n\x04HIGH\x10\x01\x12\x07\n\x03LOW\x10\x02*\\\n\nAlertState\x12\x14\n\x10\x41LERT_STATE_NONE\x10\x00\x12\r\n\tTRIGGERED\x10\x01\x12\x10\n\x0c\x41\x43KNOWLEDGED\x10\x02\x12\x0c\n\x08RESOLVED\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x32\x9e\x0f\n\x05\x41lert\x12~\n\x06\x63reate\x12..spaceone.api.monitoring.v1.CreateAlertRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\"\x1d\x82\xd3\xe4\x93\x02\x17\"\x15/monitoring/v1/alerts\x12\x88\x01\n\x06update\x12..spaceone.api.monitoring.v1.UpdateAlertRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\"\'\x82\xd3\xe4\x93\x02!\x1a\x1f/monitoring/v1/alert/{alert_id}\x12\xa8\x01\n\x0cupdate_state\x12\x33.spaceone.api.monitoring.v1.UpdateAlertStateRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\"<\x82\xd3\xe4\x93\x02\x36\"4/monitoring/v1/alert/{alert_id}/{access_key}/{state}\x12\x82\x01\n\x05merge\x12-.spaceone.api.monitoring.v1.MergeAlertRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\"#\x82\xd3\xe4\x93\x02\x1d\"\x1b/monitoring/v1/alerts/merge\x12\x8f\x01\n\x06snooze\x12..spaceone.api.monitoring.v1.SnoozeAlertRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\".\x82\xd3\xe4\x93\x02(\"&/monitoring/v1/alert/{alert_id}/snooze\x12\x9d\x01\n\radd_responder\x12\x31.spaceone.api.monitoring.v1.AlertResponderRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\"2\x82\xd3\xe4\x93\x02,\"*/monitoring/v1/alert/{alert_id}/responders\x12\xa0\x01\n\x10remove_responder\x12\x31.spaceone.api.monitoring.v1.AlertResponderRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\"2\x82\xd3\xe4\x93\x02,**/monitoring/v1/alert/{alert_id}/responders\x12\xb8\x01\n\x16\x61\x64\x64_project_dependency\x12\x39.spaceone.api.monitoring.v1.AlertProjectDependencyRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\"<\x82\xd3\xe4\x93\x02\x36\"4/monitoring/v1/alert/{alert_id}/project-dependencies\x12\xc6\x01\n\x19remove_project_dependency\x12\x39.spaceone.api.monitoring.v1.AlertProjectDependencyRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\"G\x82\xd3\xe4\x93\x02\x41*?/monitoring/v1/alert/{alert_id}/project-dependency/{project_id}\x12s\n\x06\x64\x65lete\x12(.spaceone.api.monitoring.v1.AlertRequest\x1a\x16.google.protobuf.Empty\"\'\x82\xd3\xe4\x93\x02!*\x1f/monitoring/v1/alert/{alert_id}\x12\x82\x01\n\x03get\x12+.spaceone.api.monitoring.v1.GetAlertRequest\x1a%.spaceone.api.monitoring.v1.AlertInfo\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/monitoring/v1/alert/{alert_id}\x12\x95\x01\n\x04list\x12&.spaceone.api.monitoring.v1.AlertQuery\x1a&.spaceone.api.monitoring.v1.AlertsInfo\"=\x82\xd3\xe4\x93\x02\x37\x12\x15/monitoring/v1/alertsZ\x1e\"\x1c/monitoring/v1/alerts/search\x12o\n\x04stat\x12*.spaceone.api.monitoring.v1.AlertStatQuery\x1a\x17.google.protobuf.Struct\"\"\x82\xd3\xe4\x93\x02\x1c\"\x1a/monitoring/v1/alerts/statb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,spaceone_dot_api_dot_core_dot_v1_dot_query__pb2.DESCRIPTOR,])
_ALERTURGENCY = _descriptor.EnumDescriptor(
name='AlertUrgency',
full_name='spaceone.api.monitoring.v1.AlertUrgency',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ALERT_URGENCY_NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='HIGH', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOW', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2763,
serialized_end=2820,
)
_sym_db.RegisterEnumDescriptor(_ALERTURGENCY)
AlertUrgency = enum_type_wrapper.EnumTypeWrapper(_ALERTURGENCY)
_ALERTSTATE = _descriptor.EnumDescriptor(
name='AlertState',
full_name='spaceone.api.monitoring.v1.AlertState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ALERT_STATE_NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRIGGERED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACKNOWLEDGED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESOLVED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2822,
serialized_end=2914,
)
_sym_db.RegisterEnumDescriptor(_ALERTSTATE)
AlertState = enum_type_wrapper.EnumTypeWrapper(_ALERTSTATE)
ALERT_URGENCY_NONE = 0
HIGH = 1
LOW = 2
ALERT_STATE_NONE = 0
TRIGGERED = 1
ACKNOWLEDGED = 2
RESOLVED = 3
ERROR = 4
_ALERTRESOURCE = _descriptor.Descriptor(
name='AlertResource',
full_name='spaceone.api.monitoring.v1.AlertResource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='spaceone.api.monitoring.v1.AlertResource.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resource_type', full_name='spaceone.api.monitoring.v1.AlertResource.resource_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.monitoring.v1.AlertResource.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=193,
serialized_end=266,
)
_ALERTRESPONDER = _descriptor.Descriptor(
name='AlertResponder',
full_name='spaceone.api.monitoring.v1.AlertResponder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_type', full_name='spaceone.api.monitoring.v1.AlertResponder.resource_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resource_id', full_name='spaceone.api.monitoring.v1.AlertResponder.resource_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=268,
serialized_end=328,
)
_CREATEALERTREQUEST = _descriptor.Descriptor(
name='CreateAlertRequest',
full_name='spaceone.api.monitoring.v1.CreateAlertRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='spaceone.api.monitoring.v1.CreateAlertRequest.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='spaceone.api.monitoring.v1.CreateAlertRequest.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assignee', full_name='spaceone.api.monitoring.v1.CreateAlertRequest.assignee', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='urgency', full_name='spaceone.api.monitoring.v1.CreateAlertRequest.urgency', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.CreateAlertRequest.project_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.CreateAlertRequest.domain_id', index=5,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=331,
serialized_end=503,
)
_UPDATEALERTREQUEST = _descriptor.Descriptor(
name='UpdateAlertRequest',
full_name='spaceone.api.monitoring.v1.UpdateAlertRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='alert_id', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.alert_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='title', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.title', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.state', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status_message', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.status_message', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.description', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assignee', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.assignee', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='urgency', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.urgency', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.project_id', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reset_status_message', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.reset_status_message', index=8,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reset_description', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.reset_description', index=9,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reset_assignee', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.reset_assignee', index=10,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.UpdateAlertRequest.domain_id', index=11,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=506,
serialized_end=816,
)
_UPDATEALERTSTATEREQUEST = _descriptor.Descriptor(
name='UpdateAlertStateRequest',
full_name='spaceone.api.monitoring.v1.UpdateAlertStateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='alert_id', full_name='spaceone.api.monitoring.v1.UpdateAlertStateRequest.alert_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='access_key', full_name='spaceone.api.monitoring.v1.UpdateAlertStateRequest.access_key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.monitoring.v1.UpdateAlertStateRequest.state', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=818,
serialized_end=896,
)
_MERGEALERTREQUEST = _descriptor.Descriptor(
name='MergeAlertRequest',
full_name='spaceone.api.monitoring.v1.MergeAlertRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='alerts', full_name='spaceone.api.monitoring.v1.MergeAlertRequest.alerts', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='merge_to', full_name='spaceone.api.monitoring.v1.MergeAlertRequest.merge_to', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.MergeAlertRequest.domain_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=898,
serialized_end=970,
)
_SNOOZEALERTREQUEST = _descriptor.Descriptor(
name='SnoozeAlertRequest',
full_name='spaceone.api.monitoring.v1.SnoozeAlertRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='alert_id', full_name='spaceone.api.monitoring.v1.SnoozeAlertRequest.alert_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_time', full_name='spaceone.api.monitoring.v1.SnoozeAlertRequest.end_time', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.SnoozeAlertRequest.domain_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=972,
serialized_end=1047,
)
_ALERTRESPONDERREQUEST = _descriptor.Descriptor(
name='AlertResponderRequest',
full_name='spaceone.api.monitoring.v1.AlertResponderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='alert_id', full_name='spaceone.api.monitoring.v1.AlertResponderRequest.alert_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resource_type', full_name='spaceone.api.monitoring.v1.AlertResponderRequest.resource_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resource_id', full_name='spaceone.api.monitoring.v1.AlertResponderRequest.resource_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.AlertResponderRequest.domain_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1049,
serialized_end=1153,
)
_ALERTPROJECTDEPENDENCYREQUEST = _descriptor.Descriptor(
name='AlertProjectDependencyRequest',
full_name='spaceone.api.monitoring.v1.AlertProjectDependencyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='alert_id', full_name='spaceone.api.monitoring.v1.AlertProjectDependencyRequest.alert_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.AlertProjectDependencyRequest.project_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.AlertProjectDependencyRequest.domain_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1155,
serialized_end=1243,
)
_ALERTREQUEST = _descriptor.Descriptor(
name='AlertRequest',
full_name='spaceone.api.monitoring.v1.AlertRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='alert_id', full_name='spaceone.api.monitoring.v1.AlertRequest.alert_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.AlertRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1245,
serialized_end=1296,
)
_GETALERTREQUEST = _descriptor.Descriptor(
name='GetAlertRequest',
full_name='spaceone.api.monitoring.v1.GetAlertRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='alert_id', full_name='spaceone.api.monitoring.v1.GetAlertRequest.alert_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.GetAlertRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='only', full_name='spaceone.api.monitoring.v1.GetAlertRequest.only', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1298,
serialized_end=1366,
)
_ALERTQUERY = _descriptor.Descriptor(
name='AlertQuery',
full_name='spaceone.api.monitoring.v1.AlertQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.monitoring.v1.AlertQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='alert_number', full_name='spaceone.api.monitoring.v1.AlertQuery.alert_number', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='alert_id', full_name='spaceone.api.monitoring.v1.AlertQuery.alert_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='title', full_name='spaceone.api.monitoring.v1.AlertQuery.title', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.monitoring.v1.AlertQuery.state', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assignee', full_name='spaceone.api.monitoring.v1.AlertQuery.assignee', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='urgency', full_name='spaceone.api.monitoring.v1.AlertQuery.urgency', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='severity', full_name='spaceone.api.monitoring.v1.AlertQuery.severity', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_snoozed', full_name='spaceone.api.monitoring.v1.AlertQuery.is_snoozed', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resource_id', full_name='spaceone.api.monitoring.v1.AlertQuery.resource_id', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='triggered_by', full_name='spaceone.api.monitoring.v1.AlertQuery.triggered_by', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='webhook_id', full_name='spaceone.api.monitoring.v1.AlertQuery.webhook_id', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='escalation_policy_id', full_name='spaceone.api.monitoring.v1.AlertQuery.escalation_policy_id', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.AlertQuery.project_id', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.AlertQuery.domain_id', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1369,
serialized_end=1782,
)
_ALERTINFO = _descriptor.Descriptor(
name='AlertInfo',
full_name='spaceone.api.monitoring.v1.AlertInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='alert_number', full_name='spaceone.api.monitoring.v1.AlertInfo.alert_number', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='alert_id', full_name='spaceone.api.monitoring.v1.AlertInfo.alert_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='title', full_name='spaceone.api.monitoring.v1.AlertInfo.title', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.monitoring.v1.AlertInfo.state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status_message', full_name='spaceone.api.monitoring.v1.AlertInfo.status_message', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='spaceone.api.monitoring.v1.AlertInfo.description', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assignee', full_name='spaceone.api.monitoring.v1.AlertInfo.assignee', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='urgency', full_name='spaceone.api.monitoring.v1.AlertInfo.urgency', index=7,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='severity', full_name='spaceone.api.monitoring.v1.AlertInfo.severity', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule', full_name='spaceone.api.monitoring.v1.AlertInfo.rule', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resource', full_name='spaceone.api.monitoring.v1.AlertInfo.resource', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='additional_info', full_name='spaceone.api.monitoring.v1.AlertInfo.additional_info', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_snoozed', full_name='spaceone.api.monitoring.v1.AlertInfo.is_snoozed', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='snoozed_end_time', full_name='spaceone.api.monitoring.v1.AlertInfo.snoozed_end_time', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='escalation_step', full_name='spaceone.api.monitoring.v1.AlertInfo.escalation_step', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='escalation_ttl', full_name='spaceone.api.monitoring.v1.AlertInfo.escalation_ttl', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='responders', full_name='spaceone.api.monitoring.v1.AlertInfo.responders', index=16,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_dependencies', full_name='spaceone.api.monitoring.v1.AlertInfo.project_dependencies', index=17,
number=18, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='triggered_by', full_name='spaceone.api.monitoring.v1.AlertInfo.triggered_by', index=18,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='webhook_id', full_name='spaceone.api.monitoring.v1.AlertInfo.webhook_id', index=19,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='escalation_policy_id', full_name='spaceone.api.monitoring.v1.AlertInfo.escalation_policy_id', index=20,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.monitoring.v1.AlertInfo.project_id', index=21,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.AlertInfo.domain_id', index=22,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='spaceone.api.monitoring.v1.AlertInfo.created_at', index=23,
number=31, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updated_at', full_name='spaceone.api.monitoring.v1.AlertInfo.updated_at', index=24,
number=32, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='acknowledged_at', full_name='spaceone.api.monitoring.v1.AlertInfo.acknowledged_at', index=25,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resolved_at', full_name='spaceone.api.monitoring.v1.AlertInfo.resolved_at', index=26,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='escalated_at', full_name='spaceone.api.monitoring.v1.AlertInfo.escalated_at', index=27,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1785,
serialized_end=2579,
)
_ALERTSINFO = _descriptor.Descriptor(
name='AlertsInfo',
full_name='spaceone.api.monitoring.v1.AlertsInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='spaceone.api.monitoring.v1.AlertsInfo.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='spaceone.api.monitoring.v1.AlertsInfo.total_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2581,
serialized_end=2670,
)
_ALERTSTATQUERY = _descriptor.Descriptor(
name='AlertStatQuery',
full_name='spaceone.api.monitoring.v1.AlertStatQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.monitoring.v1.AlertStatQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.monitoring.v1.AlertStatQuery.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2672,
serialized_end=2761,
)
_CREATEALERTREQUEST.fields_by_name['urgency'].enum_type = _ALERTURGENCY
_UPDATEALERTREQUEST.fields_by_name['urgency'].enum_type = _ALERTURGENCY
_ALERTQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._QUERY
_ALERTQUERY.fields_by_name['state'].enum_type = _ALERTSTATE
_ALERTQUERY.fields_by_name['urgency'].enum_type = _ALERTURGENCY
_ALERTINFO.fields_by_name['state'].enum_type = _ALERTSTATE
_ALERTINFO.fields_by_name['urgency'].enum_type = _ALERTURGENCY
_ALERTINFO.fields_by_name['resource'].message_type = _ALERTRESOURCE
_ALERTINFO.fields_by_name['additional_info'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_ALERTINFO.fields_by_name['responders'].message_type = _ALERTRESPONDER
_ALERTSINFO.fields_by_name['results'].message_type = _ALERTINFO
_ALERTSTATQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._STATISTICSQUERY
DESCRIPTOR.message_types_by_name['AlertResource'] = _ALERTRESOURCE
DESCRIPTOR.message_types_by_name['AlertResponder'] = _ALERTRESPONDER
DESCRIPTOR.message_types_by_name['CreateAlertRequest'] = _CREATEALERTREQUEST
DESCRIPTOR.message_types_by_name['UpdateAlertRequest'] = _UPDATEALERTREQUEST
DESCRIPTOR.message_types_by_name['UpdateAlertStateRequest'] = _UPDATEALERTSTATEREQUEST
DESCRIPTOR.message_types_by_name['MergeAlertRequest'] = _MERGEALERTREQUEST
DESCRIPTOR.message_types_by_name['SnoozeAlertRequest'] = _SNOOZEALERTREQUEST
DESCRIPTOR.message_types_by_name['AlertResponderRequest'] = _ALERTRESPONDERREQUEST
DESCRIPTOR.message_types_by_name['AlertProjectDependencyRequest'] = _ALERTPROJECTDEPENDENCYREQUEST
DESCRIPTOR.message_types_by_name['AlertRequest'] = _ALERTREQUEST
DESCRIPTOR.message_types_by_name['GetAlertRequest'] = _GETALERTREQUEST
DESCRIPTOR.message_types_by_name['AlertQuery'] = _ALERTQUERY
DESCRIPTOR.message_types_by_name['AlertInfo'] = _ALERTINFO
DESCRIPTOR.message_types_by_name['AlertsInfo'] = _ALERTSINFO
DESCRIPTOR.message_types_by_name['AlertStatQuery'] = _ALERTSTATQUERY
DESCRIPTOR.enum_types_by_name['AlertUrgency'] = _ALERTURGENCY
DESCRIPTOR.enum_types_by_name['AlertState'] = _ALERTSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AlertResource = _reflection.GeneratedProtocolMessageType('AlertResource', (_message.Message,), {
'DESCRIPTOR' : _ALERTRESOURCE,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.AlertResource)
})
_sym_db.RegisterMessage(AlertResource)
AlertResponder = _reflection.GeneratedProtocolMessageType('AlertResponder', (_message.Message,), {
'DESCRIPTOR' : _ALERTRESPONDER,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.AlertResponder)
})
_sym_db.RegisterMessage(AlertResponder)
CreateAlertRequest = _reflection.GeneratedProtocolMessageType('CreateAlertRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEALERTREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.CreateAlertRequest)
})
_sym_db.RegisterMessage(CreateAlertRequest)
UpdateAlertRequest = _reflection.GeneratedProtocolMessageType('UpdateAlertRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEALERTREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.UpdateAlertRequest)
})
_sym_db.RegisterMessage(UpdateAlertRequest)
UpdateAlertStateRequest = _reflection.GeneratedProtocolMessageType('UpdateAlertStateRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEALERTSTATEREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.UpdateAlertStateRequest)
})
_sym_db.RegisterMessage(UpdateAlertStateRequest)
MergeAlertRequest = _reflection.GeneratedProtocolMessageType('MergeAlertRequest', (_message.Message,), {
'DESCRIPTOR' : _MERGEALERTREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.MergeAlertRequest)
})
_sym_db.RegisterMessage(MergeAlertRequest)
SnoozeAlertRequest = _reflection.GeneratedProtocolMessageType('SnoozeAlertRequest', (_message.Message,), {
'DESCRIPTOR' : _SNOOZEALERTREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.SnoozeAlertRequest)
})
_sym_db.RegisterMessage(SnoozeAlertRequest)
AlertResponderRequest = _reflection.GeneratedProtocolMessageType('AlertResponderRequest', (_message.Message,), {
'DESCRIPTOR' : _ALERTRESPONDERREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.AlertResponderRequest)
})
_sym_db.RegisterMessage(AlertResponderRequest)
AlertProjectDependencyRequest = _reflection.GeneratedProtocolMessageType('AlertProjectDependencyRequest', (_message.Message,), {
'DESCRIPTOR' : _ALERTPROJECTDEPENDENCYREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.AlertProjectDependencyRequest)
})
_sym_db.RegisterMessage(AlertProjectDependencyRequest)
AlertRequest = _reflection.GeneratedProtocolMessageType('AlertRequest', (_message.Message,), {
'DESCRIPTOR' : _ALERTREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.AlertRequest)
})
_sym_db.RegisterMessage(AlertRequest)
GetAlertRequest = _reflection.GeneratedProtocolMessageType('GetAlertRequest', (_message.Message,), {
'DESCRIPTOR' : _GETALERTREQUEST,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.GetAlertRequest)
})
_sym_db.RegisterMessage(GetAlertRequest)
AlertQuery = _reflection.GeneratedProtocolMessageType('AlertQuery', (_message.Message,), {
'DESCRIPTOR' : _ALERTQUERY,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.AlertQuery)
})
_sym_db.RegisterMessage(AlertQuery)
AlertInfo = _reflection.GeneratedProtocolMessageType('AlertInfo', (_message.Message,), {
'DESCRIPTOR' : _ALERTINFO,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.AlertInfo)
})
_sym_db.RegisterMessage(AlertInfo)
AlertsInfo = _reflection.GeneratedProtocolMessageType('AlertsInfo', (_message.Message,), {
'DESCRIPTOR' : _ALERTSINFO,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.AlertsInfo)
})
_sym_db.RegisterMessage(AlertsInfo)
AlertStatQuery = _reflection.GeneratedProtocolMessageType('AlertStatQuery', (_message.Message,), {
'DESCRIPTOR' : _ALERTSTATQUERY,
'__module__' : 'spaceone.api.monitoring.v1.alert_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.monitoring.v1.AlertStatQuery)
})
_sym_db.RegisterMessage(AlertStatQuery)
_ALERT = _descriptor.ServiceDescriptor(
name='Alert',
full_name='spaceone.api.monitoring.v1.Alert',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2917,
serialized_end=4867,
methods=[
_descriptor.MethodDescriptor(
name='create',
full_name='spaceone.api.monitoring.v1.Alert.create',
index=0,
containing_service=None,
input_type=_CREATEALERTREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\002\027\"\025/monitoring/v1/alerts',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='update',
full_name='spaceone.api.monitoring.v1.Alert.update',
index=1,
containing_service=None,
input_type=_UPDATEALERTREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\002!\032\037/monitoring/v1/alert/{alert_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='update_state',
full_name='spaceone.api.monitoring.v1.Alert.update_state',
index=2,
containing_service=None,
input_type=_UPDATEALERTSTATEREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\0026\"4/monitoring/v1/alert/{alert_id}/{access_key}/{state}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='merge',
full_name='spaceone.api.monitoring.v1.Alert.merge',
index=3,
containing_service=None,
input_type=_MERGEALERTREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\002\035\"\033/monitoring/v1/alerts/merge',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='snooze',
full_name='spaceone.api.monitoring.v1.Alert.snooze',
index=4,
containing_service=None,
input_type=_SNOOZEALERTREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\002(\"&/monitoring/v1/alert/{alert_id}/snooze',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='add_responder',
full_name='spaceone.api.monitoring.v1.Alert.add_responder',
index=5,
containing_service=None,
input_type=_ALERTRESPONDERREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\002,\"*/monitoring/v1/alert/{alert_id}/responders',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='remove_responder',
full_name='spaceone.api.monitoring.v1.Alert.remove_responder',
index=6,
containing_service=None,
input_type=_ALERTRESPONDERREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\002,**/monitoring/v1/alert/{alert_id}/responders',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='add_project_dependency',
full_name='spaceone.api.monitoring.v1.Alert.add_project_dependency',
index=7,
containing_service=None,
input_type=_ALERTPROJECTDEPENDENCYREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\0026\"4/monitoring/v1/alert/{alert_id}/project-dependencies',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='remove_project_dependency',
full_name='spaceone.api.monitoring.v1.Alert.remove_project_dependency',
index=8,
containing_service=None,
input_type=_ALERTPROJECTDEPENDENCYREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\002A*?/monitoring/v1/alert/{alert_id}/project-dependency/{project_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='delete',
full_name='spaceone.api.monitoring.v1.Alert.delete',
index=9,
containing_service=None,
input_type=_ALERTREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002!*\037/monitoring/v1/alert/{alert_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='get',
full_name='spaceone.api.monitoring.v1.Alert.get',
index=10,
containing_service=None,
input_type=_GETALERTREQUEST,
output_type=_ALERTINFO,
serialized_options=b'\202\323\344\223\002!\022\037/monitoring/v1/alert/{alert_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='list',
full_name='spaceone.api.monitoring.v1.Alert.list',
index=11,
containing_service=None,
input_type=_ALERTQUERY,
output_type=_ALERTSINFO,
serialized_options=b'\202\323\344\223\0027\022\025/monitoring/v1/alertsZ\036\"\034/monitoring/v1/alerts/search',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='stat',
full_name='spaceone.api.monitoring.v1.Alert.stat',
index=12,
containing_service=None,
input_type=_ALERTSTATQUERY,
output_type=google_dot_protobuf_dot_struct__pb2._STRUCT,
serialized_options=b'\202\323\344\223\002\034\"\032/monitoring/v1/alerts/stat',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ALERT)
DESCRIPTOR.services_by_name['Alert'] = _ALERT
# @@protoc_insertion_point(module_scope)
| 50.676366
| 7,646
| 0.759086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20,365
| 0.285212
|
dd67c81828221987d83cf924bc48aff8f98affa6
| 3,364
|
py
|
Python
|
fluid.py
|
fomightez/stable-fluids
|
a7bdbb0960c746022a1dfc216dbfe928ee98947b
|
[
"Unlicense"
] | 1
|
2020-04-20T12:14:59.000Z
|
2020-04-20T12:14:59.000Z
|
fluid.py
|
fomightez/stable-fluids
|
a7bdbb0960c746022a1dfc216dbfe928ee98947b
|
[
"Unlicense"
] | null | null | null |
fluid.py
|
fomightez/stable-fluids
|
a7bdbb0960c746022a1dfc216dbfe928ee98947b
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import scipy.sparse as sp
from scipy.ndimage import map_coordinates
from scipy.sparse.linalg import factorized
import operators as ops
class Fluid:
def __init__(self, shape, viscosity, quantities):
self.shape = shape
# Defining these here keeps the code somewhat more readable vs. computing them every time they're needed.
self.size = np.product(shape)
self.dimensions = len(shape)
# Variable viscosity, both in time and in space, is easy to set up; but it conflicts with the use of
# SciPy's factorized function because the diffusion matrix must be recalculated every frame.
# In order to keep the simulation speedy I use fixed viscosity.
self.viscosity = viscosity
# By dynamically creating advected-diffused quantities as needed prototyping becomes much easier.
self.quantities = {}
for q in quantities:
self.quantities[q] = np.zeros(self.size)
self.velocity_field = np.zeros((self.size, self.dimensions))
# The reshaping here corresponds to a partial flattening so that self.indices
# has the same shape as self.velocity_field.
# This makes calculating the advection map as simple as a single vectorized subtraction each frame.
self.indices = np.dstack(np.indices(self.shape)).reshape(self.size, self.dimensions)
self.gradient = ops.matrices(shape, ops.differences(1, (1,) * self.dimensions), False)
# Both viscosity and pressure equations are just Poisson equations similar to the steady state heat equation.
laplacian = ops.matrices(shape, ops.differences(1, (2,) * self.dimensions), True)
self.pressure_solver = factorized(laplacian)
# Making sure I use the sparse version of the identity function here so I don't cast to a dense matrix.
self.viscosity_solver = factorized(sp.identity(self.size) - laplacian * viscosity)
def advect_diffuse(self):
# Advection is computed backwards in time as described in Jos Stam's Stable Fluids whitepaper.
advection_map = np.moveaxis(self.indices - self.velocity_field, -1, 0)
def kernel(field):
# Credit to Philip Zucker for pointing out the aptness of map_coordinates here.
# Initially I was using SciPy's griddata function.
# While both of these functions do essentially the same thing, griddata is much slower.
advected = map_coordinates(field.reshape(self.shape), advection_map, order=2).flatten()
return self.viscosity_solver(advected) if self.viscosity > 0 else advected
# Apply viscosity and advection to each axis of the velocity field and each user-defined quantity.
for d in range(self.dimensions):
self.velocity_field[..., d] = kernel(self.velocity_field[..., d])
for k, q in self.quantities.items():
self.quantities[k] = kernel(q)
def project(self):
# Pressure is calculated from divergence which is in turn calculated from the gradient of the velocity field.
divergence = sum(self.gradient[d].dot(self.velocity_field[..., d]) for d in range(self.dimensions))
pressure = self.pressure_solver(divergence)
for d in range(self.dimensions):
self.velocity_field[..., d] -= self.gradient[d].dot(pressure)
| 51.753846
| 117
| 0.69352
| 3,206
| 0.953032
| 0
| 0
| 0
| 0
| 0
| 0
| 1,406
| 0.417955
|
dd69e272fd1cf6715ec8277d234fe3f1835d95b2
| 879
|
py
|
Python
|
setup.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
setup.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
setup.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
PROJECT_URLS = {
'Bug Tracker': 'https://github.com/ngocjr7/geneticpython/issues',
'Documentation': 'https://github.com/ngocjr7/geneticpython/blob/master/README.md',
'Source Code': 'https://github.com/ngocjr7/geneticpython'
}
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
setup(name='geneticpython',
description='A simple and friendly Python framework for genetic-based algorithms',
author='Ngoc Bui',
long_description=long_description,
long_description_content_type="text/markdown",
author_email='ngocjr7@gmail.com',
project_urls=PROJECT_URLS,
version='0.0.2',
packages=find_packages(),
install_requires=install_requires,
python_requires='>=3.6')
| 33.807692
| 88
| 0.703072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 374
| 0.425484
|
dd6a3b230c4f4617de43080fbacd944fd76f81a8
| 182
|
py
|
Python
|
Script Python/Desafios/Desafio-012.py
|
Hubyratan/Material-Python
|
3f7910dd10b91cefe7b2f3f6aa49e9f9c522fd32
|
[
"MIT"
] | 1
|
2020-05-11T04:05:56.000Z
|
2020-05-11T04:05:56.000Z
|
Script Python/Desafios/Desafio-012.py
|
Hubyratan/Material-Python
|
3f7910dd10b91cefe7b2f3f6aa49e9f9c522fd32
|
[
"MIT"
] | null | null | null |
Script Python/Desafios/Desafio-012.py
|
Hubyratan/Material-Python
|
3f7910dd10b91cefe7b2f3f6aa49e9f9c522fd32
|
[
"MIT"
] | null | null | null |
v = float(input('Valor do produto? R$'))
d = float(input('Porcentagem de desconto: '))
ds = (d*v)/100
p = v-ds
print(f'Você recebeu R${ds} de desconto, e pagará somente R${p} reais')
| 36.4
| 71
| 0.659341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.625
|
dd6c3adf785e5f57e8767ea1f8f416eff17606e6
| 208
|
py
|
Python
|
gavia/__init__.py
|
brett-hosking/gavia
|
d771235902b9a949efa92a1954d47dac8b1b527f
|
[
"MIT"
] | null | null | null |
gavia/__init__.py
|
brett-hosking/gavia
|
d771235902b9a949efa92a1954d47dac8b1b527f
|
[
"MIT"
] | null | null | null |
gavia/__init__.py
|
brett-hosking/gavia
|
d771235902b9a949efa92a1954d47dac8b1b527f
|
[
"MIT"
] | null | null | null |
# Package modules
from gavia import camera
from gavia import vizualise
from gavia import nav
from gavia import gps
from gavia import log
from gavia import time
# For nested packages
from gavia import image
| 20.8
| 27
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.1875
|
dd6dcaf577bcfeb980245ce8c8087ba55da11a51
| 1,241
|
py
|
Python
|
pynetester/pynetester/netester.py
|
zoumingzhe/pynetester
|
256776491b65d4b5f474754e02537f28a5961888
|
[
"MIT"
] | null | null | null |
pynetester/pynetester/netester.py
|
zoumingzhe/pynetester
|
256776491b65d4b5f474754e02537f28a5961888
|
[
"MIT"
] | null | null | null |
pynetester/pynetester/netester.py
|
zoumingzhe/pynetester
|
256776491b65d4b5f474754e02537f28a5961888
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding=utf-8
# ----------------------------------------------------------------------------------------------------
import socket
import os
# ----------------------------------------------------------------------------------------------------
# 类 netester
# ----------------------------------------------------------------------------------------------------
# 变更履历:
# 2021-05-17 | Zou Mingzhe | Ver0.1 | 初始版本
# ----------------------------------------------------------------------------------------------------
# MAP:
# 未开发 | server(self, ...) | 压缩文件
# ----------------------------------------------------------------------------------------------------
class netester:
"""
netester类操作network io。
"""
# ----------------------------------------------------------------------------------------------------
@staticmethod
def client(config = {'connect':[{'type':'TCP','host':'127.0.0.1','port':10517}]}):
for item in config['connect']:
info = netester.encode(item)
s = socket.socket(socket.AF_INET, info['type'])
s.connect(info['addr'])
# ----------------------------------------------------------------------------------------------------
| 45.962963
| 102
| 0.2361
| 450
| 0.350195
| 0
| 0
| 276
| 0.214786
| 0
| 0
| 1,009
| 0.785214
|
dd6f59dfe10afced9799503fae00bbea231ed3f2
| 7,121
|
py
|
Python
|
cmepy/solver.py
|
hegland/cmepy
|
fa8cdf2fad779badbcb629bf6ee33316724ec4a4
|
[
"BSD-3-Clause"
] | 6
|
2015-05-21T04:56:26.000Z
|
2019-12-11T18:23:58.000Z
|
cmepy/solver.py
|
fcostin/cmepy
|
fa8cdf2fad779badbcb629bf6ee33316724ec4a4
|
[
"BSD-3-Clause"
] | 1
|
2018-11-28T15:45:39.000Z
|
2019-09-18T23:25:40.000Z
|
cmepy/solver.py
|
hegland/cmepy
|
fa8cdf2fad779badbcb629bf6ee33316724ec4a4
|
[
"BSD-3-Clause"
] | 3
|
2017-05-13T06:56:08.000Z
|
2021-09-01T14:20:52.000Z
|
"""
Creates solvers for the Chemical Master Equation (CME).
"""
import numpy
from cmepy import cme_matrix, domain, ode_solver, other_solver, state_enum
from cmepy import model as mdl
def create_packing_functions(domain_enum):
"""
create_packing_functions(domain_enum) -> (pack, unpack)
where
pack((p, p_sink)) -> y
unpack(y) -> (p, p_sink)
"""
def pack((p, p_sink)):
"""
pack((p, p_sink)) -> y
where
p : mapping from states to probability
p_sink : float, storing probability lost from domain due to
truncation of domain states
y : array passed to differential equations solver
"""
d_dense = domain_enum.pack_distribution(p)
return numpy.concatenate((d_dense, [p_sink]))
def unpack(y):
"""
unpack(y) -> (p, p_sink)
where
p : mapping from states to probability
p_sink : float, storing probability lost from domain due to
truncation of domain states
y : array passed to differential equations solver
"""
p_sparse = domain_enum.unpack_distribution(y[:-1])
p_sink = y[-1]
return p_sparse, p_sink
return (pack, unpack)
def create(model,
sink,
p_0=None,
t_0=None,
sink_0=None,
time_dependencies=None,
domain_states=None,
solver=ode_solver.Solver,
outflow=False,
**solver_args):
"""
Returns a solver for the Chemical Master Equation of the given model.
arguments:
model : the CME model to solve
sink : If sink is True, the solver will include a 'sink' state used
to accumulate any probability that may flow outside the domain.
This can be used to measure the error in the solution due to
truncation of the domain. If sink is False, the solver will not
include a 'sink' state, and probability will be artificially
prevented from flowing outside of the domain.
p_0 : (optional) mapping from states in the domain to probabilities,
for the initial probability distribution. If not specified,
and the initial state of the state space is given by the model,
defaults to all probability concentrated at the initial state,
otherwise, a ValueError will be raised.
t_0 : (optional) initial time, defaults to 0.0
sink_0 : (optional) initial sink probability, defaults to 0.0
Only a valid argument if sink is set to True.
time_dependencies : (optional) By default, reaction propensities are
time independent. If specified, time_dependencies must be of the
form { s_1 : phi_1, ..., s_n : phi_n }, where each (s_j, phi_j)
item satisifes :
s_j : set of reaction indices
phi_j : phi_j(t) -> time dependent coefficient
The propensities of the reactions with indicies contained in s_j
will all be multiplied by the coefficient phi_j(t), at time t.
Reactions are indexed according to the ordering of the propensities
in the model.
The reaction index sets s_j must be *disjoint*. It is not necessary
for the union of the s_j to include all the reaction indices.
If a reaction's index is not contained in any s_j then the reaction
is treated as time-independent.
mapping of time dependent coefficient
functions keyed by subsets of reaction indices, with respect to the
ordering of reactions determined by the order of the propensity
functions inside the model. The propensities of the reactions
with indices included in each subset are multiplied by the time
dependent coefficient function. By default, no time dependent
coefficient functions are specified, that is, the CME has
time-independent propensities.
domain_states : (optional) array of states in the domain.
By default, generate the rectangular lattice of states defined by
the 'shape' entry of the model. A ValueError is raised if both
domain_states and 'shape' are unspecified.
"""
mdl.validate_model(model)
if sink and outflow:
raise ValueError('sink and outflow cannot be both True')
if sink_0 is not None:
if not sink:
raise ValueError('sink_0 may not be specified if sink is False')
sink_0 = float(sink_0)
else:
sink_0 = 0.0
# determine states in domain, then construct an enumeration of the
# domain states
if domain_states is None:
if mdl.SHAPE not in model:
lament = 'if no states given, model must contain key \'%s\''
raise KeyError(lament % mdl.SHAPE)
else:
domain_states = domain.from_rect(shape = model.shape)
domain_enum = state_enum.create(domain_states)
# determine p_0, then construct a dense representation with respect to
# the domain enumeration
initial_state = model.get(mdl.INITIAL_STATE, None)
if p_0 is None:
if initial_state is None:
lament = 'if no p_0 given, model must contain key \'%s\''
raise ValueError(lament % mdl.INITIAL_STATE)
else:
p_0 = {initial_state : 1.0}
if t_0 is None:
t_0 = 0.0
member_flags = domain_enum.contains(domain.from_iter(p_0))
if not numpy.logical_and.reduce(member_flags):
raise ValueError('support of p_0 is not a subset of domain_states')
# compute reaction matrices and use them to define differential equations
gen_matrices = cme_matrix.gen_reaction_matrices(
model,
domain_enum,
sink,
cme_matrix.non_neg_states,
outflow=outflow
)
reaction_matrices = list(gen_matrices)
dy_dt = cme_matrix.create_diff_eqs(
reaction_matrices,
phi = time_dependencies
)
if solver_args:
solver_args['reaction_matrices'] = reaction_matrices
# construct and initialise solver
if sink:
cme_solver = solver(
dy_dt,
y_0 = (p_0, sink_0),
t_0 = t_0,
**solver_args
)
pack, unpack = create_packing_functions(domain_enum)
cme_solver.set_packing(
pack,
unpack,
transform_dy_dt = False
)
else:
pack = domain_enum.pack_distribution
unpack = domain_enum.unpack_distribution
cme_solver = solver(
dy_dt,
y_0 = p_0,
t_0 = t_0,
**solver_args
)
cme_solver.set_packing(
pack,
unpack,
transform_dy_dt = False
)
return cme_solver
| 35.078818
| 79
| 0.602303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,303
| 0.604269
|
dd767e6d50fc90c7d830096cddd6903575b2142e
| 1,290
|
py
|
Python
|
server_common/helpers.py
|
GustavLero/EPICS-inst_servers
|
4bcdd6a80f1d9e074de3f0f7c66968d506981988
|
[
"BSD-3-Clause"
] | null | null | null |
server_common/helpers.py
|
GustavLero/EPICS-inst_servers
|
4bcdd6a80f1d9e074de3f0f7c66968d506981988
|
[
"BSD-3-Clause"
] | null | null | null |
server_common/helpers.py
|
GustavLero/EPICS-inst_servers
|
4bcdd6a80f1d9e074de3f0f7c66968d506981988
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import os
import sys
from server_common.ioc_data_source import IocDataSource
from server_common.mysql_abstraction_layer import SQLAbstraction
from server_common.utilities import print_and_log, SEVERITY
def register_ioc_start(ioc_name, pv_database=None, prefix=None):
"""
A helper function to register the start of an ioc.
Args:
ioc_name: name of the ioc to start
pv_database: doctionary of pvs in the iov
prefix: prefix of pvs in this ioc
"""
try:
exepath = sys.argv[0]
if pv_database is None:
pv_database = {}
if prefix is None:
prefix = "none"
ioc_data_source = IocDataSource(SQLAbstraction("iocdb", "iocdb", "$iocdb"))
ioc_data_source.insert_ioc_start(ioc_name, os.getpid(), exepath, pv_database, prefix)
except Exception as e:
print_and_log("Error registering ioc start: {}: {}".format(e.__class__.__name__, e), SEVERITY.MAJOR)
def get_macro_values():
"""
Parse macro environment JSON into dict. To make this work use the icpconfigGetMacros program.
Returns: Macro Key:Value pairs as dict
"""
macros = json.loads(os.environ.get("REFL_MACROS", ""))
macros = {key: value for (key, value) in macros.items()}
return macros
| 32.25
| 108
| 0.686047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 444
| 0.344186
|
dd782114838d338a027967eb958ee0dd0d6070b0
| 12,799
|
py
|
Python
|
rman_ui/rman_ui_txmanager.py
|
ian-hsieh/RenderManForBlender
|
c827f029f4cbbd1fcc71ed8d3694fc5ac58cc468
|
[
"MIT"
] | 12
|
2019-05-03T21:58:15.000Z
|
2022-02-24T07:02:21.000Z
|
rman_ui/rman_ui_txmanager.py
|
ian-hsieh/RenderManForBlender
|
c827f029f4cbbd1fcc71ed8d3694fc5ac58cc468
|
[
"MIT"
] | 4
|
2019-03-07T18:20:16.000Z
|
2020-09-24T21:53:15.000Z
|
rman_ui/rman_ui_txmanager.py
|
ian-hsieh/RenderManForBlender
|
c827f029f4cbbd1fcc71ed8d3694fc5ac58cc468
|
[
"MIT"
] | 3
|
2019-05-25T01:17:09.000Z
|
2019-09-13T14:43:12.000Z
|
import bpy
from bpy.props import StringProperty, IntProperty, CollectionProperty, EnumProperty, BoolProperty
from bpy.types import PropertyGroup, UIList, Operator, Panel
from bpy_extras.io_utils import ImportHelper
from .rman_ui_base import _RManPanelHeader
from ..txmanager3 import txparams
from ..rman_utils import texture_utils
from .. import txmanager3 as txmngr3
import os
import uuid
class TxFileItem(PropertyGroup):
"""UIList item representing a TxFile"""
name: StringProperty(
name="Name",
description="Image name",
default="")
tooltip: StringProperty(
name="tooltip",
description="Tool Tip",
default="")
nodeID: StringProperty(
name="nodeID",
description="Node ID (hidden)",
default="")
state: IntProperty(
name="state",
description="",
default=0
)
enable: BoolProperty(
name="enable",
description="Enable or disable this TxFileItem",
default=True
)
txsettings = ['texture_type',
'smode',
'tmode',
'texture_format',
'data_type',
'resize']
items = []
for item in txparams.TX_TYPES:
items.append((item, item, ''))
texture_type: EnumProperty(
name="Texture Type",
items=items,
description="Texture Type",
default=txparams.TX_TYPE_REGULAR)
items = []
for item in txparams.TX_WRAP_MODES:
items.append((item, item, ''))
smode: EnumProperty(
name="S Wrap",
items=items,
default=txparams.TX_WRAP_MODE_PERIODIC)
tmode: EnumProperty(
name="T Wrap",
items=items,
default=txparams.TX_WRAP_MODE_PERIODIC)
items = []
for item in txparams.TX_FORMATS:
items.append((item, item, ''))
texture_format: EnumProperty(
name="Format",
default=txparams.TX_FORMAT_PIXAR,
items=items,
description="Texture format")
items = []
items.append(('default', 'default', ''))
for item in txparams.TX_DATATYPES:
items.append((item, item, ''))
data_type: EnumProperty(
name="Data Type",
default=txparams.TX_DATATYPE_FLOAT,
items=items,
description="The data storage txmake uses")
items = []
for item in txparams.TX_RESIZES:
items.append((item, item, ''))
resize: EnumProperty(
name="Resize",
default=txparams.TX_RESIZE_UP_DASH,
items=items,
description="The type of resizing flag to pass to txmake")
class PRMAN_UL_Renderman_txmanager_list(UIList):
"""RenderMan TxManager UIList."""
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
icons_map = {txmngr3.STATE_MISSING: 'ERROR',
txmngr3.STATE_EXISTS: 'CHECKBOX_HLT',
txmngr3.STATE_IS_TEX: 'TEXTURE',
txmngr3.STATE_IN_QUEUE: 'PLUS',
txmngr3.STATE_PROCESSING: 'TIME',
txmngr3.STATE_ERROR: 'CANCEL',
txmngr3.STATE_REPROCESS: 'TIME',
txmngr3.STATE_UNKNOWN: 'CANCEL',
txmngr3.STATE_INPUT_MISSING: 'ERROR'}
txfile = None
if item.nodeID != "":
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
else:
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_path(item.name)
if txfile:
custom_icon = icons_map[txfile.state]
else:
custom_icon = 'CANCEL'
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.label(text=item.name, icon = custom_icon)
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
layout.label(text="", icon = custom_icon)
class PRMAN_OT_Renderman_txmanager_parse_scene(Operator):
"""Parse scene for textures to add to to the txmanager"""
bl_idname = "rman_txmgr_list.parse_scene"
bl_label = "Parse Scene"
def execute(self, context):
rman_txmgr_list = context.scene.rman_txmgr_list
rman_txmgr_list.clear()
texture_utils.get_txmanager().txmanager.reset()
texture_utils.parse_for_textures(context.scene)
texture_utils.get_txmanager().txmake_all(blocking=False)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_pick_images(Operator, ImportHelper):
"""Pick images from a directory."""
bl_idname = "rman_txmgr_list.pick_images"
bl_label = "Pick Images"
filename: StringProperty(maxlen=1024)
directory: StringProperty(maxlen=1024)
files: CollectionProperty(type=bpy.types.PropertyGroup)
def execute(self, context):
rman_txmgr_list = context.scene.rman_txmgr_list
rman_txmgr_list.clear()
texture_utils.get_txmanager().txmanager.reset()
if len(self.files) > 0:
for f in self.files:
img = os.path.join(self.directory, f.name)
item = context.scene.rman_txmgr_list.add()
item.nodeID = str(uuid.uuid1())
texture_utils.get_txmanager().txmanager.add_texture(item.nodeID, img)
item.name = img
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_clear_all_cache(Operator):
"""Clear RenderMan Texture cache"""
bl_idname = "rman_txmgr_list.clear_all_cache"
bl_label = "Clear Texture Cache"
def execute(self, context):
# needs to call InvalidateTexture
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_reconvert_all(Operator):
"""Clear all .tex files and re-convert."""
bl_idname = "rman_txmgr_list.reconvert_all"
bl_label = "RE-Convert All"
def execute(self, context):
texture_utils.get_txmanager().txmanager.delete_texture_files()
texture_utils.get_txmanager().txmake_all(blocking=False)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_apply_preset(Operator):
"""Apply current settings to the selected texture."""
bl_idname = "rman_txmgr_list.apply_preset"
bl_label = "Apply preset"
def execute(self, context):
idx = context.scene.rman_txmgr_list_index
item = context.scene.rman_txmgr_list[idx]
txsettings = dict()
for attr in item.txsettings:
val = getattr(item, attr)
if attr == 'data_type' and val == 'default':
val = None
txsettings[attr] = val
if txsettings:
txfile = None
if item.nodeID != "":
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
else:
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_path(item.name)
txfile.params.set_params_from_dict(txsettings)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_add_texture(Operator):
"""Add texture."""
bl_idname = "rman_txmgr_list.add_texture"
bl_label = "add_texture"
filepath: StringProperty()
nodeID: StringProperty()
def execute(self, context):
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_path(self.filepath)
if not txfile:
return{'FINISHED'}
item = None
# check if nodeID already exists in the list
for i in context.scene.rman_txmgr_list:
if i.nodeID == self.nodeID:
item = i
break
if not item:
item = context.scene.rman_txmgr_list.add()
item.nodeID = self.nodeID
item.name = txfile.input_image
params = txfile.params
item.texture_type = params.texture_type
item.smode = params.smode
item.tmode = params.tmode
item.texture_type = params.texture_type
if params.data_type is not None:
item.data_type = params.data_type
item.resize = params.resize
item.state = txfile.state
if txfile.state == txmngr3.STATE_IS_TEX:
item.enable = False
item.tooltip = '\n' + txfile.tooltip_text()
# FIXME: should also add the nodes that this texture is referenced in
return{'FINISHED'}
class PRMAN_PT_Renderman_txmanager_list(_RManPanelHeader, Panel):
"""RenderMan Texture Manager Panel."""
bl_label = "RenderMan Texture Manager"
bl_idname = "PRMAN_PT_Renderman_txmanager_list"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.row()
row.operator('rman_txmgr_list.parse_scene', text='Parse Scene')
# FIXME: not totally working. The done callbacks fail
#row.operator('rman_txmgr_list.pick_images', text='Pick Images')
row.operator('rman_txmgr_list.reconvert_all', text='Reconvert')
row.operator('rman_txmgr_list.clear_all_cache', text='Clear All Cache')
if scene.rman_txmgr_list_index >= 0 and scene.rman_txmgr_list:
row = layout.row()
row.template_list("PRMAN_UL_Renderman_txmanager_list", "The_List", scene,
"rman_txmgr_list", scene, "rman_txmgr_list_index", item_dyntip_propname="tooltip")
item = scene.rman_txmgr_list[scene.rman_txmgr_list_index]
row = layout.row()
row.label(text='Texture Settings')
row = layout.row()
row.enabled = item.enable
row.prop(item, "texture_type")
row = layout.row()
row.enabled = item.enable
row.prop(item, "smode")
row.prop(item, "tmode")
row = layout.row()
row.enabled = item.enable
row.prop(item, "texture_format")
row = layout.row()
row.enabled = item.enable
row.prop(item, "data_type")
row = layout.row()
row.enabled = item.enable
row.prop(item, "resize")
row = layout.row()
row.enabled = item.enable
row.alignment = 'RIGHT'
row.operator('rman_txmgr_list.apply_preset', text='Apply')
row = layout.row()
row.alignment='CENTER'
in_list = len(context.scene.rman_txmgr_list)
progress = 'All Converted'
qsize = texture_utils.get_txmanager().txmanager.workQueue.qsize()
if qsize != 0:
progress = 'Converting...%d left to convert' % (qsize)
row.label(text=progress)
def index_updated(self, context):
'''
When the index updates, make sure the texture settings
are in sync with the txmanager.
'''
idx = context.scene.rman_txmgr_list_index
if idx < 0:
return
item = context.scene.rman_txmgr_list[idx]
txfile = None
if item.nodeID != "":
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
else:
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_path(item.name)
if txfile:
params = txfile.params
item.texture_type = params.texture_type
item.smode = params.smode
item.tmode = params.tmode
item.texture_type = params.texture_type
if params.data_type is not None:
item.data_type = params.data_type
item.resize = params.resize
if txfile.state == txmngr3.STATE_IS_TEX:
item.enable = False
classes = [
TxFileItem,
PRMAN_UL_Renderman_txmanager_list,
PRMAN_OT_Renderman_txmanager_parse_scene,
PRMAN_OT_Renderman_txmanager_pick_images,
PRMAN_OT_Renderman_txmanager_clear_all_cache,
PRMAN_OT_Renderman_txmanager_reconvert_all,
PRMAN_OT_Renderman_txmanager_apply_preset,
PRMAN_OT_Renderman_txmanager_add_texture,
PRMAN_PT_Renderman_txmanager_list
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.rman_txmgr_list = CollectionProperty(type = TxFileItem)
bpy.types.Scene.rman_txmgr_list_index = IntProperty(name = "RenderMan Texture Manager",
default = 0, update=index_updated)
def unregister():
del bpy.types.Scene.rman_txmgr_list
del bpy.types.Scene.rman_txmgr_list_index
for cls in classes:
bpy.utils.unregister_class(cls)
| 32.734015
| 110
| 0.615595
| 10,539
| 0.823424
| 0
| 0
| 0
| 0
| 0
| 0
| 2,109
| 0.164778
|
dd788c7b5bde6a0a3088e641302680a262892fc0
| 943
|
py
|
Python
|
cousins-in-binary-tree/cousins-in-binary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2
|
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
cousins-in-binary-tree/cousins-in-binary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
cousins-in-binary-tree/cousins-in-binary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isCousins(self, root: Optional[TreeNode], x: int, y: int) -> bool:
# condition to be cousin: (1) diff.parents (2) same level
stack=[(root, 0, -1)]
xlevel, ylevel = -1, -1
xparent, yparent = -1, -1
while(stack):
cur, depth, parent = stack.pop(0)
if cur.val==x:
xlevel, xparent = depth, parent
if cur.val==y:
ylevel, yparent = depth, parent
if cur.left:
stack.append((cur.left, depth+1, cur.val))
if cur.right:
stack.append((cur.right, depth+1, cur.val))
if xlevel==ylevel and xparent!=yparent:
return True
else:
return False
| 36.269231
| 74
| 0.520679
| 751
| 0.796394
| 0
| 0
| 0
| 0
| 0
| 0
| 243
| 0.257688
|
dd793146f92f3801277eeab8ef75d59d722ca146
| 5,581
|
py
|
Python
|
rdmo/questions/tests/test_validator_locked_questions.py
|
berkerY/rdmo
|
c0500f9b6caff9106a254a05e0d0e8018fc8db28
|
[
"Apache-2.0"
] | 77
|
2016-08-09T11:40:20.000Z
|
2022-03-06T11:03:26.000Z
|
rdmo/questions/tests/test_validator_locked_questions.py
|
MSpenger/rdmo
|
c0500f9b6caff9106a254a05e0d0e8018fc8db28
|
[
"Apache-2.0"
] | 377
|
2016-07-01T13:59:36.000Z
|
2022-03-30T13:53:19.000Z
|
rdmo/questions/tests/test_validator_locked_questions.py
|
MSpenger/rdmo
|
c0500f9b6caff9106a254a05e0d0e8018fc8db28
|
[
"Apache-2.0"
] | 47
|
2016-06-23T11:32:19.000Z
|
2022-03-01T11:34:37.000Z
|
import pytest
from django.core.exceptions import ValidationError
from rest_framework.exceptions import \
ValidationError as RestFameworkValidationError
from ..models import Question, QuestionSet
from ..serializers.v1 import QuestionSerializer
from ..validators import QuestionLockedValidator
def test_create(db):
QuestionLockedValidator()({
'questionset': QuestionSet.objects.first(),
'locked': False
})
def test_create_locked(db):
QuestionLockedValidator()({
'questionset': QuestionSet.objects.first(),
'locked': True
})
def test_update(db):
question = Question.objects.first()
QuestionLockedValidator(question)({
'questionset': question.questionset,
'locked': False
})
def test_update_error(db):
question = Question.objects.first()
question.locked = True
question.save()
with pytest.raises(ValidationError):
QuestionLockedValidator(question)({
'questionset': question.questionset,
'locked': True
})
def test_update_parent_error(db):
question = Question.objects.first()
question.questionset.locked = True
question.questionset.save()
with pytest.raises(ValidationError):
QuestionLockedValidator(question)({
'questionset': question.questionset,
'locked': False
})
def test_update_parent_parent_error(db):
question = Question.objects.first()
question.questionset.section.locked = True
question.questionset.section.save()
with pytest.raises(ValidationError):
QuestionLockedValidator(question)({
'questionset': question.questionset,
'locked': False
})
def test_update_parent_parent_parent_error(db):
question = Question.objects.first()
question.questionset.section.catalog.locked = True
question.questionset.section.catalog.save()
with pytest.raises(ValidationError):
QuestionLockedValidator(question)({
'questionset': question.questionset,
'locked': False
})
def test_update_lock(db):
question = Question.objects.first()
QuestionLockedValidator(question)({
'questionset': question.questionset,
'locked': True
})
def test_update_unlock(db):
question = Question.objects.first()
question.locked = True
question.save()
QuestionLockedValidator(question)({
'questionset': question.questionset,
'locked': False
})
def test_serializer_create(db):
validator = QuestionLockedValidator()
validator.set_context(QuestionSerializer())
validator({
'questionset': QuestionSet.objects.first(),
'locked': False
})
def test_serializer_create_locked(db):
validator = QuestionLockedValidator()
validator.set_context(QuestionSerializer())
validator({
'questionset': QuestionSet.objects.first(),
'locked': True
})
def test_serializer_update(db):
question = Question.objects.first()
validator = QuestionLockedValidator()
validator.set_context(QuestionSerializer(instance=question))
validator({
'questionset': question.questionset,
'locked': False
})
def test_serializer_update_error(db):
question = Question.objects.first()
question.locked = True
question.save()
validator = QuestionLockedValidator()
validator.set_context(QuestionSerializer(instance=question))
with pytest.raises(RestFameworkValidationError):
validator({
'questionset': question.questionset,
'locked': True
})
def test_serializer_update_parent_error(db):
question = Question.objects.first()
question.questionset.locked = True
question.questionset.save()
validator = QuestionLockedValidator()
validator.set_context(QuestionSerializer(instance=question))
with pytest.raises(RestFameworkValidationError):
validator({
'questionset': question.questionset,
'locked': True
})
def test_serializer_update_parent_parent_error(db):
question = Question.objects.first()
question.questionset.section.locked = True
question.questionset.section.save()
validator = QuestionLockedValidator()
validator.set_context(QuestionSerializer(instance=question))
with pytest.raises(RestFameworkValidationError):
validator({
'questionset': question.questionset,
'locked': True
})
def test_serializer_update_parent_parent_parent_error(db):
question = Question.objects.first()
question.questionset.section.catalog.locked = True
question.questionset.section.catalog.save()
validator = QuestionLockedValidator()
validator.set_context(QuestionSerializer(instance=question))
with pytest.raises(RestFameworkValidationError):
validator({
'questionset': question.questionset,
'locked': True
})
def test_serializer_update_lock(db):
question = Question.objects.first()
validator = QuestionLockedValidator()
validator.set_context(QuestionSerializer(instance=question))
validator({
'questionset': question.questionset,
'locked': True
})
def test_serializer_update_unlock(db):
question = Question.objects.first()
question.locked = True
question.save()
validator = QuestionLockedValidator()
validator.set_context(QuestionSerializer(instance=question))
validator({
'questionset': question.questionset,
'locked': False
})
| 25.600917
| 64
| 0.689124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 378
| 0.06773
|
dd79ddb1ceb718cc4031b1942143fae9a88b3147
| 80,373
|
py
|
Python
|
samtranslator/model/sam_resources.py
|
aaythapa/serverless-application-model
|
1029548db5787845d1a321d7c182aeb3716933ba
|
[
"Apache-2.0"
] | null | null | null |
samtranslator/model/sam_resources.py
|
aaythapa/serverless-application-model
|
1029548db5787845d1a321d7c182aeb3716933ba
|
[
"Apache-2.0"
] | null | null | null |
samtranslator/model/sam_resources.py
|
aaythapa/serverless-application-model
|
1029548db5787845d1a321d7c182aeb3716933ba
|
[
"Apache-2.0"
] | null | null | null |
""" SAM macro definitions """
from six import string_types
import copy
import uuid
import samtranslator.model.eventsources
import samtranslator.model.eventsources.pull
import samtranslator.model.eventsources.push
import samtranslator.model.eventsources.cloudwatchlogs
from .api.api_generator import ApiGenerator
from .api.http_api_generator import HttpApiGenerator
from .packagetype import ZIP, IMAGE
from .s3_utils.uri_parser import construct_s3_location_object, construct_image_code_object
from .tags.resource_tagging import get_tag_list
from samtranslator.model import PropertyType, SamResourceMacro, ResourceTypeResolver
from samtranslator.model.apigateway import (
ApiGatewayDeployment,
ApiGatewayStage,
ApiGatewayDomainName,
ApiGatewayUsagePlan,
ApiGatewayUsagePlanKey,
ApiGatewayApiKey,
)
from samtranslator.model.apigatewayv2 import ApiGatewayV2Stage, ApiGatewayV2DomainName
from samtranslator.model.cloudformation import NestedStack
from samtranslator.model.s3 import S3Bucket
from samtranslator.model.cloudwatch import SyntheticsCanary, CloudWatchAlarm
from samtranslator.model.dynamodb import DynamoDBTable
from samtranslator.model.exceptions import InvalidEventException, InvalidResourceException
from samtranslator.model.resource_policies import ResourcePolicies, PolicyTypes
from samtranslator.model.iam import IAMRole, IAMRolePolicies
from samtranslator.model.lambda_ import (
LambdaFunction,
LambdaVersion,
LambdaAlias,
LambdaLayerVersion,
LambdaEventInvokeConfig,
)
from samtranslator.model.types import dict_of, is_str, is_type, list_of, one_of, any_type
from samtranslator.translator import logical_id_generator
from samtranslator.translator.arn_generator import ArnGenerator
from samtranslator.model.intrinsics import (
is_intrinsic_if,
is_intrinsic_no_value,
ref,
make_not_conditional,
make_conditional,
make_and_condition,
)
from samtranslator.model.sqs import SQSQueue
from samtranslator.model.sns import SNSTopic
from samtranslator.model.stepfunctions import StateMachineGenerator
from samtranslator.model.role_utils import construct_role_for_resource
from samtranslator.model.xray_utils import get_xray_managed_policy_name
# len(prefix) + MAX_CANARY_LOGICAL_ID_LENGTH + MAX_CANARY_UNIQUE_ID_LENGTH + 1 (extra '-' char added) must be less
# than or equal to 21
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-synthetics-canary.html#cfn-synthetics-canary-name
MAX_CANARY_LOGICAL_ID_LENGTH = 11
MAX_CANARY_UNIQUE_ID_LENGTH = 5
CANARY_NAME_PREFIX = "sam-"
# The default values for ComparisonOperator, Threshold and Period based on the MetricName provided by the user
# These default values were acquired from the Create Canary page in the Synthetics Canary dashboard
# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_metrics.html
DEFAULT_METRIC_VALUES = {
"SuccessPercent": {"ComparisonOperator": "LessThanThreshold", "Threshold": 90, "Period": 300},
"Failed": {"ComparisonOperator": "GreaterThanOrEqualToThreshold", "Threshold": 1, "Period": 300},
"Duration": {"ComparisonOperator": "GreaterThanThreshold", "Threshold": 30000, "Period": 900},
}
# the main metrics produced by Synthetics Canary
# sorted for backwards compatibility (python 2.7 automatically sorts keys)
VALID_CANARY_METRICS = list(sorted(DEFAULT_METRIC_VALUES))
class SamFunction(SamResourceMacro):
"""SAM function macro."""
resource_type = "AWS::Serverless::Function"
property_types = {
"FunctionName": PropertyType(False, one_of(is_str(), is_type(dict))),
"Handler": PropertyType(False, is_str()),
"Runtime": PropertyType(False, is_str()),
"CodeUri": PropertyType(False, one_of(is_str(), is_type(dict))),
"ImageUri": PropertyType(False, is_str()),
"PackageType": PropertyType(False, is_str()),
"InlineCode": PropertyType(False, one_of(is_str(), is_type(dict))),
"DeadLetterQueue": PropertyType(False, is_type(dict)),
"Description": PropertyType(False, is_str()),
"MemorySize": PropertyType(False, is_type(int)),
"Timeout": PropertyType(False, is_type(int)),
"VpcConfig": PropertyType(False, is_type(dict)),
"Role": PropertyType(False, is_str()),
"AssumeRolePolicyDocument": PropertyType(False, is_type(dict)),
"Policies": PropertyType(False, one_of(is_str(), is_type(dict), list_of(one_of(is_str(), is_type(dict))))),
"PermissionsBoundary": PropertyType(False, is_str()),
"Environment": PropertyType(False, dict_of(is_str(), is_type(dict))),
"Events": PropertyType(False, dict_of(is_str(), is_type(dict))),
"Tags": PropertyType(False, is_type(dict)),
"Tracing": PropertyType(False, one_of(is_type(dict), is_str())),
"KmsKeyArn": PropertyType(False, one_of(is_type(dict), is_str())),
"DeploymentPreference": PropertyType(False, is_type(dict)),
"ReservedConcurrentExecutions": PropertyType(False, any_type()),
"Layers": PropertyType(False, list_of(one_of(is_str(), is_type(dict)))),
"EventInvokeConfig": PropertyType(False, is_type(dict)),
# Intrinsic functions in value of Alias property are not supported, yet
"AutoPublishAlias": PropertyType(False, one_of(is_str())),
"AutoPublishCodeSha256": PropertyType(False, one_of(is_str())),
"VersionDescription": PropertyType(False, is_str()),
"ProvisionedConcurrencyConfig": PropertyType(False, is_type(dict)),
"FileSystemConfigs": PropertyType(False, list_of(is_type(dict))),
"ImageConfig": PropertyType(False, is_type(dict)),
"CodeSigningConfigArn": PropertyType(False, is_str()),
}
event_resolver = ResourceTypeResolver(
samtranslator.model.eventsources,
samtranslator.model.eventsources.pull,
samtranslator.model.eventsources.push,
samtranslator.model.eventsources.cloudwatchlogs,
)
# DeadLetterQueue
dead_letter_queue_policy_actions = {"SQS": "sqs:SendMessage", "SNS": "sns:Publish"}
#
# Conditions
conditions = {}
# Customers can refer to the following properties of SAM function
referable_properties = {
"Alias": LambdaAlias.resource_type,
"Version": LambdaVersion.resource_type,
# EventConfig auto created SQS and SNS
"DestinationTopic": SNSTopic.resource_type,
"DestinationQueue": SQSQueue.resource_type,
}
def resources_to_link(self, resources):
try:
return {"event_resources": self._event_resources_to_link(resources)}
except InvalidEventException as e:
raise InvalidResourceException(self.logical_id, e.message)
def to_cloudformation(self, **kwargs):
"""Returns the Lambda function, role, and event resources to which this SAM Function corresponds.
:param dict kwargs: already-converted resources that may need to be modified when converting this \
macro to pure CloudFormation
:returns: a list of vanilla CloudFormation Resources, to which this Function expands
:rtype: list
"""
resources = []
intrinsics_resolver = kwargs["intrinsics_resolver"]
mappings_resolver = kwargs.get("mappings_resolver", None)
conditions = kwargs.get("conditions", {})
if self.DeadLetterQueue:
self._validate_dlq()
lambda_function = self._construct_lambda_function()
resources.append(lambda_function)
if self.ProvisionedConcurrencyConfig:
if not self.AutoPublishAlias:
raise InvalidResourceException(
self.logical_id,
"To set ProvisionedConcurrencyConfig " "AutoPublishALias must be defined on the function",
)
lambda_alias = None
alias_name = ""
if self.AutoPublishAlias:
alias_name = self._get_resolved_alias_name("AutoPublishAlias", self.AutoPublishAlias, intrinsics_resolver)
code_sha256 = None
if self.AutoPublishCodeSha256:
code_sha256 = intrinsics_resolver.resolve_parameter_refs(self.AutoPublishCodeSha256)
lambda_version = self._construct_version(
lambda_function, intrinsics_resolver=intrinsics_resolver, code_sha256=code_sha256
)
lambda_alias = self._construct_alias(alias_name, lambda_function, lambda_version)
resources.append(lambda_version)
resources.append(lambda_alias)
if self.DeploymentPreference:
self._validate_deployment_preference_and_add_update_policy(
kwargs.get("deployment_preference_collection", None),
lambda_alias,
intrinsics_resolver,
mappings_resolver,
)
event_invoke_policies = []
if self.EventInvokeConfig:
function_name = lambda_function.logical_id
event_invoke_resources, event_invoke_policies = self._construct_event_invoke_config(
function_name, alias_name, lambda_alias, intrinsics_resolver, conditions
)
resources.extend(event_invoke_resources)
managed_policy_map = kwargs.get("managed_policy_map", {})
if not managed_policy_map:
raise Exception("Managed policy map is empty, but should not be.")
execution_role = None
if lambda_function.Role is None:
execution_role = self._construct_role(managed_policy_map, event_invoke_policies)
lambda_function.Role = execution_role.get_runtime_attr("arn")
resources.append(execution_role)
try:
resources += self._generate_event_resources(
lambda_function,
execution_role,
kwargs["event_resources"],
intrinsics_resolver,
lambda_alias=lambda_alias,
)
except InvalidEventException as e:
raise InvalidResourceException(self.logical_id, e.message)
return resources
def _construct_event_invoke_config(self, function_name, alias_name, lambda_alias, intrinsics_resolver, conditions):
"""
Create a `AWS::Lambda::EventInvokeConfig` based on the input dict `EventInvokeConfig`
"""
resources = []
policy_document = []
# Try to resolve.
resolved_event_invoke_config = intrinsics_resolver.resolve_parameter_refs(self.EventInvokeConfig)
logical_id = "{id}EventInvokeConfig".format(id=function_name)
if lambda_alias:
lambda_event_invoke_config = LambdaEventInvokeConfig(
logical_id=logical_id, depends_on=[lambda_alias.logical_id], attributes=self.resource_attributes
)
else:
lambda_event_invoke_config = LambdaEventInvokeConfig(
logical_id=logical_id, attributes=self.resource_attributes
)
dest_config = {}
input_dest_config = resolved_event_invoke_config.get("DestinationConfig")
if input_dest_config and input_dest_config.get("OnSuccess") is not None:
resource, on_success, policy = self._validate_and_inject_resource(
input_dest_config.get("OnSuccess"), "OnSuccess", logical_id, conditions
)
dest_config["OnSuccess"] = on_success
self.EventInvokeConfig["DestinationConfig"]["OnSuccess"]["Destination"] = on_success.get("Destination")
if resource is not None:
resources.extend([resource])
if policy is not None:
policy_document.append(policy)
if input_dest_config and input_dest_config.get("OnFailure") is not None:
resource, on_failure, policy = self._validate_and_inject_resource(
input_dest_config.get("OnFailure"), "OnFailure", logical_id, conditions
)
dest_config["OnFailure"] = on_failure
self.EventInvokeConfig["DestinationConfig"]["OnFailure"]["Destination"] = on_failure.get("Destination")
if resource is not None:
resources.extend([resource])
if policy is not None:
policy_document.append(policy)
lambda_event_invoke_config.FunctionName = ref(function_name)
if alias_name:
lambda_event_invoke_config.Qualifier = alias_name
else:
lambda_event_invoke_config.Qualifier = "$LATEST"
lambda_event_invoke_config.DestinationConfig = dest_config
lambda_event_invoke_config.MaximumEventAgeInSeconds = resolved_event_invoke_config.get(
"MaximumEventAgeInSeconds"
)
lambda_event_invoke_config.MaximumRetryAttempts = resolved_event_invoke_config.get("MaximumRetryAttempts")
resources.extend([lambda_event_invoke_config])
return resources, policy_document
def _validate_and_inject_resource(self, dest_config, event, logical_id, conditions):
"""
For Event Invoke Config, if the user has not specified a destination ARN for SQS/SNS, SAM
auto creates a SQS and SNS resource with defaults. Intrinsics are supported in the Destination
ARN property, so to handle conditional ifs we have to inject if conditions in the auto created
SQS/SNS resources as well as in the policy documents.
"""
accepted_types_list = ["SQS", "SNS", "EventBridge", "Lambda"]
auto_inject_list = ["SQS", "SNS"]
resource = None
policy = {}
destination = {}
destination["Destination"] = dest_config.get("Destination")
resource_logical_id = logical_id + event
if dest_config.get("Type") is None or dest_config.get("Type") not in accepted_types_list:
raise InvalidResourceException(
self.logical_id, "'Type: {}' must be one of {}".format(dest_config.get("Type"), accepted_types_list)
)
property_condition, dest_arn = self._get_or_make_condition(
dest_config.get("Destination"), logical_id, conditions
)
if dest_config.get("Destination") is None or property_condition is not None:
combined_condition = self._make_and_conditions(
self.get_passthrough_resource_attributes().get("Condition"), property_condition, conditions
)
if dest_config.get("Type") in auto_inject_list:
if dest_config.get("Type") == "SQS":
resource = SQSQueue(
resource_logical_id + "Queue", attributes=self.get_passthrough_resource_attributes()
)
if dest_config.get("Type") == "SNS":
resource = SNSTopic(
resource_logical_id + "Topic", attributes=self.get_passthrough_resource_attributes()
)
if combined_condition:
resource.set_resource_attribute("Condition", combined_condition)
if property_condition:
destination["Destination"] = make_conditional(
property_condition, resource.get_runtime_attr("arn"), dest_arn
)
else:
destination["Destination"] = resource.get_runtime_attr("arn")
policy = self._add_event_invoke_managed_policy(
dest_config, resource_logical_id, property_condition, destination["Destination"]
)
else:
raise InvalidResourceException(
self.logical_id, "Destination is required if Type is not {}".format(auto_inject_list)
)
if dest_config.get("Destination") is not None and property_condition is None:
policy = self._add_event_invoke_managed_policy(
dest_config, resource_logical_id, None, dest_config.get("Destination")
)
return resource, destination, policy
def _make_and_conditions(self, resource_condition, property_condition, conditions):
if resource_condition is None:
return property_condition
if property_condition is None:
return resource_condition
and_condition = make_and_condition([{"Condition": resource_condition}, {"Condition": property_condition}])
condition_name = self._make_gen_condition_name(resource_condition + "AND" + property_condition, self.logical_id)
conditions[condition_name] = and_condition
return condition_name
def _get_or_make_condition(self, destination, logical_id, conditions):
"""
This method checks if there is an If condition on Destination property. Since we auto create
SQS and SNS if the destination ARN is not provided, we need to make sure that If condition
is handled here.
True case: Only create the Queue/Topic if the condition is true
Destination: !If [SomeCondition, {Ref: AWS::NoValue}, queue-arn]
False case : Only create the Queue/Topic if the condition is false.
Destination: !If [SomeCondition, queue-arn, {Ref: AWS::NoValue}]
For the false case, we need to add a new condition that negates the existing condition, and
add that to the top-level Conditions.
"""
if destination is None:
return None, None
if is_intrinsic_if(destination):
dest_list = destination.get("Fn::If")
if is_intrinsic_no_value(dest_list[1]) and is_intrinsic_no_value(dest_list[2]):
return None, None
if is_intrinsic_no_value(dest_list[1]):
return dest_list[0], dest_list[2]
if is_intrinsic_no_value(dest_list[2]):
condition = dest_list[0]
not_condition = self._make_gen_condition_name("NOT" + condition, logical_id)
conditions[not_condition] = make_not_conditional(condition)
return not_condition, dest_list[1]
return None, None
def _make_gen_condition_name(self, name, hash_input):
# Make sure the property name is not over 255 characters (CFN limit)
hash_digest = logical_id_generator.LogicalIdGenerator("", hash_input).gen()
condition_name = name + hash_digest
if len(condition_name) > 255:
return input(condition_name)[:255]
return condition_name
def _get_resolved_alias_name(self, property_name, original_alias_value, intrinsics_resolver):
"""
Alias names can be supplied as an intrinsic function. This method tries to extract alias name from a reference
to a parameter. If it cannot completely resolve (ie. if a complex intrinsic function was used), then this
method raises an exception. If alias name is just a plain string, it will return as is
:param dict or string original_alias_value: Value of Alias property as provided by the customer
:param samtranslator.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Instance of the resolver that
knows how to resolve parameter references
:return string: Alias name
:raises InvalidResourceException: If the value is a complex intrinsic function that cannot be resolved
"""
# Try to resolve.
resolved_alias_name = intrinsics_resolver.resolve_parameter_refs(original_alias_value)
if not isinstance(resolved_alias_name, string_types):
# This is still a dictionary which means we are not able to completely resolve intrinsics
raise InvalidResourceException(
self.logical_id, "'{}' must be a string or a Ref to a template parameter".format(property_name)
)
return resolved_alias_name
def _construct_lambda_function(self):
"""Constructs and returns the Lambda function.
:returns: a list containing the Lambda function and execution role resources
:rtype: list
"""
lambda_function = LambdaFunction(
self.logical_id, depends_on=self.depends_on, attributes=self.resource_attributes
)
if self.FunctionName:
lambda_function.FunctionName = self.FunctionName
lambda_function.Handler = self.Handler
lambda_function.Runtime = self.Runtime
lambda_function.Description = self.Description
lambda_function.MemorySize = self.MemorySize
lambda_function.Timeout = self.Timeout
lambda_function.VpcConfig = self.VpcConfig
lambda_function.Role = self.Role
lambda_function.Environment = self.Environment
lambda_function.Code = self._construct_code_dict()
lambda_function.KmsKeyArn = self.KmsKeyArn
lambda_function.ReservedConcurrentExecutions = self.ReservedConcurrentExecutions
lambda_function.Tags = self._construct_tag_list(self.Tags)
lambda_function.Layers = self.Layers
lambda_function.FileSystemConfigs = self.FileSystemConfigs
lambda_function.ImageConfig = self.ImageConfig
lambda_function.PackageType = self.PackageType
if self.Tracing:
lambda_function.TracingConfig = {"Mode": self.Tracing}
if self.DeadLetterQueue:
lambda_function.DeadLetterConfig = {"TargetArn": self.DeadLetterQueue["TargetArn"]}
lambda_function.CodeSigningConfigArn = self.CodeSigningConfigArn
self._validate_package_type(lambda_function)
return lambda_function
def _add_event_invoke_managed_policy(self, dest_config, logical_id, condition, dest_arn):
policy = {}
if dest_config and dest_config.get("Type"):
if dest_config.get("Type") == "SQS":
policy = IAMRolePolicies.sqs_send_message_role_policy(dest_arn, logical_id)
if dest_config.get("Type") == "SNS":
policy = IAMRolePolicies.sns_publish_role_policy(dest_arn, logical_id)
# Event Bridge and Lambda Arns are passthrough.
if dest_config.get("Type") == "EventBridge":
policy = IAMRolePolicies.event_bus_put_events_role_policy(dest_arn, logical_id)
if dest_config.get("Type") == "Lambda":
policy = IAMRolePolicies.lambda_invoke_function_role_policy(dest_arn, logical_id)
return policy
def _construct_role(self, managed_policy_map, event_invoke_policies):
"""Constructs a Lambda execution role based on this SAM function's Policies property.
:returns: the generated IAM Role
:rtype: model.iam.IAMRole
"""
role_attributes = self.get_passthrough_resource_attributes()
if self.AssumeRolePolicyDocument is not None:
assume_role_policy_document = self.AssumeRolePolicyDocument
else:
assume_role_policy_document = IAMRolePolicies.lambda_assume_role_policy()
managed_policy_arns = [ArnGenerator.generate_aws_managed_policy_arn("service-role/AWSLambdaBasicExecutionRole")]
if self.Tracing:
managed_policy_name = get_xray_managed_policy_name()
managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn(managed_policy_name))
if self.VpcConfig:
managed_policy_arns.append(
ArnGenerator.generate_aws_managed_policy_arn("service-role/AWSLambdaVPCAccessExecutionRole")
)
function_policies = ResourcePolicies(
{"Policies": self.Policies},
# No support for policy templates in the "core"
policy_template_processor=None,
)
policy_documents = []
if self.DeadLetterQueue:
policy_documents.append(
IAMRolePolicies.dead_letter_queue_policy(
self.dead_letter_queue_policy_actions[self.DeadLetterQueue["Type"]],
self.DeadLetterQueue["TargetArn"],
)
)
if self.EventInvokeConfig:
if event_invoke_policies is not None:
policy_documents.extend(event_invoke_policies)
execution_role = construct_role_for_resource(
resource_logical_id=self.logical_id,
attributes=role_attributes,
managed_policy_map=managed_policy_map,
assume_role_policy_document=assume_role_policy_document,
resource_policies=function_policies,
managed_policy_arns=managed_policy_arns,
policy_documents=policy_documents,
permissions_boundary=self.PermissionsBoundary,
tags=self._construct_tag_list(self.Tags),
)
return execution_role
def _validate_package_type(self, lambda_function):
"""
Validates Function based on the existence of Package type
"""
packagetype = lambda_function.PackageType or ZIP
if packagetype not in [ZIP, IMAGE]:
raise InvalidResourceException(
lambda_function.logical_id,
"PackageType needs to be `{zip}` or `{image}`".format(zip=ZIP, image=IMAGE),
)
def _validate_package_type_zip():
if not all([lambda_function.Runtime, lambda_function.Handler]):
raise InvalidResourceException(
lambda_function.logical_id,
"Runtime and Handler needs to be present when PackageType is of type `{zip}`".format(zip=ZIP),
)
if any([lambda_function.Code.get("ImageUri", False), lambda_function.ImageConfig]):
raise InvalidResourceException(
lambda_function.logical_id,
"ImageUri or ImageConfig cannot be present when PackageType is of type `{zip}`".format(zip=ZIP),
)
def _validate_package_type_image():
if any([lambda_function.Handler, lambda_function.Runtime, lambda_function.Layers]):
raise InvalidResourceException(
lambda_function.logical_id,
"Runtime, Handler, Layers cannot be present when PackageType is of type `{image}`".format(
image=IMAGE
),
)
if not lambda_function.Code.get("ImageUri"):
raise InvalidResourceException(
lambda_function.logical_id,
"ImageUri needs to be present when PackageType is of type `{image}`".format(image=IMAGE),
)
_validate_per_package_type = {ZIP: _validate_package_type_zip, IMAGE: _validate_package_type_image}
# Call appropriate validation function based on the package type.
return _validate_per_package_type[packagetype]()
def _validate_dlq(self):
"""Validates whether the DeadLetterQueue LogicalId is validation
:raise: InvalidResourceException
"""
# Validate required logical ids
valid_dlq_types = str(list(self.dead_letter_queue_policy_actions.keys()))
if not self.DeadLetterQueue.get("Type") or not self.DeadLetterQueue.get("TargetArn"):
raise InvalidResourceException(
self.logical_id,
"'DeadLetterQueue' requires Type and TargetArn properties to be specified.".format(valid_dlq_types),
)
# Validate required Types
if not self.DeadLetterQueue["Type"] in self.dead_letter_queue_policy_actions:
raise InvalidResourceException(
self.logical_id, "'DeadLetterQueue' requires Type of {}".format(valid_dlq_types)
)
def _event_resources_to_link(self, resources):
event_resources = {}
if self.Events:
for logical_id, event_dict in self.Events.items():
try:
event_source = self.event_resolver.resolve_resource_type(event_dict).from_dict(
self.logical_id + logical_id, event_dict, logical_id
)
except (TypeError, AttributeError) as e:
raise InvalidEventException(logical_id, "{}".format(e))
event_resources[logical_id] = event_source.resources_to_link(resources)
return event_resources
@staticmethod
def order_events(event):
"""
Helper method for sorting Function Events. Returns a key to use in sorting this event
This is mainly used for HttpApi Events, where we need to evaluate the "$default" path (if any)
before we evaluate any of the other paths ("/", etc), so we can make sure we don't create any
redundant permissions. This sort places "$" before "/" or any alphanumeric characters.
:param event: tuple of (logical_id, event_dictionary) that contains event information
"""
logical_id, event_dict = event
if not isinstance(event_dict, dict):
return logical_id
return event_dict.get("Properties", {}).get("Path", logical_id)
def _generate_event_resources(
self, lambda_function, execution_role, event_resources, intrinsics_resolver, lambda_alias=None
):
"""Generates and returns the resources associated with this function's events.
:param model.lambda_.LambdaFunction lambda_function: generated Lambda function
:param iam.IAMRole execution_role: generated Lambda execution role
:param implicit_api: Global Implicit API resource where the implicit APIs get attached to, if necessary
:param implicit_api_stage: Global implicit API stage resource where implicit APIs get attached to, if necessary
:param event_resources: All the event sources associated with this Lambda function
:param model.lambda_.LambdaAlias lambda_alias: Optional Lambda Alias resource if we want to connect the
event sources to this alias
:returns: a list containing the function's event resources
:rtype: list
"""
resources = []
if self.Events:
for logical_id, event_dict in sorted(self.Events.items(), key=SamFunction.order_events):
try:
eventsource = self.event_resolver.resolve_resource_type(event_dict).from_dict(
lambda_function.logical_id + logical_id, event_dict, logical_id
)
except TypeError as e:
raise InvalidEventException(logical_id, "{}".format(e))
kwargs = {
# When Alias is provided, connect all event sources to the alias and *not* the function
"function": lambda_alias or lambda_function,
"role": execution_role,
"intrinsics_resolver": intrinsics_resolver,
}
for name, resource in event_resources[logical_id].items():
kwargs[name] = resource
resources += eventsource.to_cloudformation(**kwargs)
return resources
def _construct_code_dict(self):
"""Constructs Lambda Code Dictionary based on the accepted SAM artifact properties such
as `InlineCode`, `CodeUri` and `ImageUri` and also raises errors if more than one of them is
defined. `PackageType` determines which artifacts are considered.
:raises InvalidResourceException when conditions on the SAM artifact properties are not met.
"""
# list of accepted artifacts
packagetype = self.PackageType or ZIP
artifacts = {}
if packagetype == ZIP:
artifacts = {"InlineCode": self.InlineCode, "CodeUri": self.CodeUri}
elif packagetype == IMAGE:
artifacts = {"ImageUri": self.ImageUri}
if packagetype not in [ZIP, IMAGE]:
raise InvalidResourceException(self.logical_id, "invalid 'PackageType' : {}".format(packagetype))
# Inline function for transformation of inline code.
# It accepts arbitrary argumemnts, because the arguments do not matter for the result.
def _construct_inline_code(*args, **kwargs):
return {"ZipFile": self.InlineCode}
# dispatch mechanism per artifact on how it needs to be transformed.
artifact_dispatch = {
"InlineCode": _construct_inline_code,
"CodeUri": construct_s3_location_object,
"ImageUri": construct_image_code_object,
}
filtered_artifacts = dict(filter(lambda x: x[1] != None, artifacts.items()))
# There are more than one allowed artifact types present, raise an Error.
# There are no valid artifact types present, also raise an Error.
if len(filtered_artifacts) > 1 or len(filtered_artifacts) == 0:
if packagetype == ZIP and len(filtered_artifacts) == 0:
raise InvalidResourceException(self.logical_id, "Only one of 'InlineCode' or 'CodeUri' can be set.")
elif packagetype == IMAGE:
raise InvalidResourceException(self.logical_id, "'ImageUri' must be set.")
filtered_keys = [key for key in filtered_artifacts.keys()]
# NOTE(sriram-mv): This precedence order is important. It is protect against python2 vs python3
# dictionary ordering when getting the key values with .keys() on a dictionary.
# Do not change this precedence order.
if "InlineCode" in filtered_keys:
filtered_key = "InlineCode"
elif "CodeUri" in filtered_keys:
filtered_key = "CodeUri"
elif "ImageUri" in filtered_keys:
filtered_key = "ImageUri"
else:
raise InvalidResourceException(self.logical_id, "Either 'InlineCode' or 'CodeUri' must be set.")
dispatch_function = artifact_dispatch[filtered_key]
return dispatch_function(artifacts[filtered_key], self.logical_id, filtered_key)
def _construct_version(self, function, intrinsics_resolver, code_sha256=None):
"""Constructs a Lambda Version resource that will be auto-published when CodeUri of the function changes.
Old versions will not be deleted without a direct reference from the CloudFormation template.
:param model.lambda_.LambdaFunction function: Lambda function object that is being connected to a version
:param model.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Class that can help resolve
references to parameters present in CodeUri. It is a common usecase to set S3Key of Code to be a
template parameter. Need to resolve the values otherwise we will never detect a change in Code dict
:param str code_sha256: User predefined hash of the Lambda function code
:return: Lambda function Version resource
"""
code_dict = function.Code
if not code_dict:
raise ValueError("Lambda function code must be a valid non-empty dictionary")
if not intrinsics_resolver:
raise ValueError("intrinsics_resolver is required for versions creation")
# Resolve references to template parameters before creating hash. This will *not* resolve all intrinsics
# because we cannot resolve runtime values like Arn of a resource. For purposes of detecting changes, this
# is good enough. Here is why:
#
# When using intrinsic functions there are two cases when has must change:
# - Value of the template parameter changes
# - (or) LogicalId of a referenced resource changes ie. !GetAtt NewResource.Arn
#
# Later case will already change the hash because some value in the Code dictionary changes. We handle the
# first case by resolving references to template parameters. It is okay even if these references are
# present inside another intrinsic such as !Join. The resolver will replace the reference with the parameter's
# value and keep all other parts of !Join identical. This will still trigger a change in the hash.
code_dict = intrinsics_resolver.resolve_parameter_refs(code_dict)
# Construct the LogicalID of Lambda version by appending 10 characters of SHA of CodeUri. This is necessary
# to trigger creation of a new version every time code location changes. Since logicalId changes, CloudFormation
# will drop the old version and create a new one for us. We set a DeletionPolicy on the version resource to
# prevent CloudFormation from actually deleting the underlying version resource
#
# SHA Collisions: For purposes of triggering a new update, we are concerned about just the difference previous
# and next hashes. The chances that two subsequent hashes collide is fairly low.
prefix = "{id}Version".format(id=self.logical_id)
logical_dict = {}
try:
logical_dict = code_dict.copy()
except (AttributeError, UnboundLocalError):
pass
else:
if function.Environment:
logical_dict.update(function.Environment)
if function.MemorySize:
logical_dict.update({"MemorySize": function.MemorySize})
logical_id = logical_id_generator.LogicalIdGenerator(prefix, logical_dict, code_sha256).gen()
attributes = self.get_passthrough_resource_attributes()
if attributes is None:
attributes = {}
if "DeletionPolicy" not in attributes:
attributes["DeletionPolicy"] = "Retain"
lambda_version = LambdaVersion(logical_id=logical_id, attributes=attributes)
lambda_version.FunctionName = function.get_runtime_attr("name")
lambda_version.Description = self.VersionDescription
return lambda_version
def _construct_alias(self, name, function, version):
"""Constructs a Lambda Alias for the given function and pointing to the given version
:param string name: Name of the alias
:param model.lambda_.LambdaFunction function: Lambda function object to associate the alias with
:param model.lambda_.LambdaVersion version: Lambda version object to associate the alias with
:return: Lambda alias object
:rtype model.lambda_.LambdaAlias
"""
if not name:
raise InvalidResourceException(self.logical_id, "Alias name is required to create an alias")
logical_id = "{id}Alias{suffix}".format(id=function.logical_id, suffix=name)
alias = LambdaAlias(logical_id=logical_id, attributes=self.get_passthrough_resource_attributes())
alias.Name = name
alias.FunctionName = function.get_runtime_attr("name")
alias.FunctionVersion = version.get_runtime_attr("version")
if self.ProvisionedConcurrencyConfig:
alias.ProvisionedConcurrencyConfig = self.ProvisionedConcurrencyConfig
return alias
def _validate_deployment_preference_and_add_update_policy(
self, deployment_preference_collection, lambda_alias, intrinsics_resolver, mappings_resolver
):
if "Enabled" in self.DeploymentPreference:
# resolve intrinsics and mappings for Type
enabled = self.DeploymentPreference["Enabled"]
enabled = intrinsics_resolver.resolve_parameter_refs(enabled)
enabled = mappings_resolver.resolve_parameter_refs(enabled)
self.DeploymentPreference["Enabled"] = enabled
if "Type" in self.DeploymentPreference:
# resolve intrinsics and mappings for Type
preference_type = self.DeploymentPreference["Type"]
preference_type = intrinsics_resolver.resolve_parameter_refs(preference_type)
preference_type = mappings_resolver.resolve_parameter_refs(preference_type)
self.DeploymentPreference["Type"] = preference_type
if deployment_preference_collection is None:
raise ValueError("deployment_preference_collection required for parsing the deployment preference")
deployment_preference_collection.add(self.logical_id, self.DeploymentPreference)
if deployment_preference_collection.get(self.logical_id).enabled:
if self.AutoPublishAlias is None:
raise InvalidResourceException(
self.logical_id, "'DeploymentPreference' requires AutoPublishAlias property to be specified."
)
if lambda_alias is None:
raise ValueError("lambda_alias expected for updating it with the appropriate update policy")
lambda_alias.set_resource_attribute(
"UpdatePolicy", deployment_preference_collection.update_policy(self.logical_id).to_dict()
)
class SamCanary(SamResourceMacro):
"""SAM canary macro."""
resource_type = "AWS::Serverless::Canary"
property_types = {
"FunctionName": PropertyType(False, one_of(is_str(), is_type(dict))),
"Handler": PropertyType(True, is_str()),
"Runtime": PropertyType(True, is_str()),
"CodeUri": PropertyType(False, one_of(is_str(), is_type(dict))),
"InlineCode": PropertyType(False, one_of(is_str(), is_type(dict))),
"MemorySize": PropertyType(False, is_type(int)),
"Tags": PropertyType(False, is_type(dict)),
# Easier to pass through as AWS::Synthetics::Canary only accepts a boolean
"ActiveTracing": PropertyType(False, is_type(bool)),
"AssumeRolePolicyDocument": PropertyType(False, is_type(dict)),
"Timeout": PropertyType(False, is_type(int)),
"Role": PropertyType(False, is_str()),
"Schedule": PropertyType(True, is_type(dict)),
"StartCanaryAfterCreation": PropertyType(True, is_type(bool)),
"ArtifactS3Location": PropertyType(False, one_of(is_type(dict), is_str())),
"FailureRetentionPeriod": PropertyType(False, is_type(int)),
"SuccessRetentionPeriod": PropertyType(False, is_type(int)),
"VpcConfig": PropertyType(False, is_type(dict)),
"Environment": PropertyType(False, dict_of(is_str(), is_type(dict))),
"Policies": PropertyType(False, one_of(is_str(), is_type(dict), list_of(one_of(is_str(), is_type(dict))))),
"CanaryMetricAlarms": PropertyType(False, list_of(is_type(dict))),
}
def to_cloudformation(self, **kwargs):
"""Returns the Synthetics Canary to which this SAM Canary corresponds.
:param dict kwargs: already-converted resources that may need to be modified when converting this \
macro to pure CloudFormation
:returns: a list of vanilla CloudFormation Resources, to which this Serverless Canary expands
:rtype: list
"""
resources = []
managed_policy_map = kwargs.get("managed_policy_map", {})
synthetics_canary = self._construct_synthetics_canary()
resources.append(synthetics_canary)
# A S3 Bucket resource will be added to the transformed template if the user doesn't provide an artifact
# bucket to store canary results
artifact_bucket_name = ""
if not self.ArtifactS3Location:
s3bucket = self._construct_artifact_bucket()
resources.append(s3bucket)
synthetics_canary.ArtifactS3Location = {"Fn::Join": ["", ["s3://", {"Ref": s3bucket.logical_id}]]}
artifact_bucket_name = {"Ref": s3bucket.logical_id}
if not self.Role:
role = self._construct_role(artifact_bucket_name, managed_policy_map)
resources.append(role)
synthetics_canary.ExecutionRoleArn = role.get_runtime_attr("arn")
if self.CanaryMetricAlarms:
self._validate_cloudwatch_alarms()
for alarm_dict in self.CanaryMetricAlarms:
resources.append(self._construct_cloudwatch_alarm(alarm_dict))
return resources
def _validate_cloudwatch_alarms(self):
"""Validates the CanaryMetricAlarms property in Serverless Canary
The property should follow the following structure
CanaryMetricAlarms:
- AlarmName:
MetricName (required): one of the metrics in VALID_CANARY_METRICS
Threshold (optional): any value of type double
ComparisonOperator (optional): any of the valid values (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cw-alarm.html#cfn-cloudwatch-alarms-comparisonoperator)
Period (optional): Integer that is 10, 30, 60, or any multiple of 60
Note: Alarm names are used as logical ids for their respective CloudWatchAlarm property so if user has multiple
alarms there should be no duplicate names as we don't want the alarms to override each other without the user's
knowledge
:raise: InvalidResourceException
"""
# keeps list of alarm names to make sure there are no duplicates
list_of_alarm_names = []
for alarm_dict in self.CanaryMetricAlarms:
# Throw an error if there is more than one alarm in the array index, like for example
# CanaryMetricAlarms:
# - Alarm1:
# MetricName: SuccessPercent
# Alarm2:
# MetricName: SuccessPercent
# Threshold: 90
# - Alarm3:
# MetricName: Failed
# throws an error for Alarm2 since its Alarm1 is already defined in that dict
if len(alarm_dict) != 1:
raise InvalidResourceException(self.logical_id, "Must have one alarm per array index")
# get the alarm name and the properties the user defined for the alarm
alarm_name = next(iter(alarm_dict))
alarm_item = alarm_dict[alarm_name]
# MetricName property is required
if alarm_item is None or "MetricName" not in alarm_item:
raise InvalidResourceException(
self.logical_id,
"CloudWatch alarm '{key}' is missing required property 'MetricName'.".format(key=alarm_name),
)
metric_name = alarm_item["MetricName"]
# MetricName must be one of the values in VALID_CANARY_METRICS
if metric_name not in VALID_CANARY_METRICS:
raise InvalidResourceException(
self.logical_id,
"MetricName needs to be one of {}".format(VALID_CANARY_METRICS),
)
# make sure all the alarm names are unique
if alarm_name in list_of_alarm_names:
raise InvalidResourceException(self.logical_id, "Duplicate CloudWatch alarm names")
else:
list_of_alarm_names.append(alarm_name)
def _construct_cloudwatch_alarm(self, alarm_dict):
"""Constructs an CloudWatch::Alarm resource if the user specifies the CloudWatchAlarm property in Serverless Canary
:param dict alarm_dict: Alarm name and properties as provided by the customer
:returns: the generated CloudWatch Alarm
:rtype: model.cloudwatch.CloudWatchAlarm
"""
# gets alarm name and the properties defined by user
alarm_name = next(iter(alarm_dict))
alarm_item = alarm_dict[alarm_name]
cloudwatch_alarm = CloudWatchAlarm(
logical_id=alarm_name,
depends_on=self.depends_on,
attributes=self.get_passthrough_resource_attributes(),
)
# default settings for the CloudWatch alarms
# the settings are identical to the Alarms that are made by Synthetics Canary using their dashboard
cloudwatch_alarm.MetricName = alarm_item["MetricName"]
cloudwatch_alarm.Namespace = "CloudWatchSynthetics"
cloudwatch_alarm.EvaluationPeriods = 1
cloudwatch_alarm.Statistic = "Sum"
cloudwatch_alarm.TreatMissingData = "notBreaching"
# connects the alarm to the metric produced by the Synthetics canary from this Serverless resource
cloudwatch_alarm.Dimensions = [{"Name": "CanaryName", "Value": {"Ref": self.logical_id}}]
# set the values if user provides them, if not set them to default value based on the MetricName
cloudwatch_alarm.ComparisonOperator = alarm_item.get(
"ComparisonOperator", DEFAULT_METRIC_VALUES[alarm_item["MetricName"]]["ComparisonOperator"]
)
cloudwatch_alarm.Threshold = float(
alarm_item.get("Threshold", DEFAULT_METRIC_VALUES[alarm_item["MetricName"]]["Threshold"])
)
cloudwatch_alarm.Period = alarm_item.get("Period", DEFAULT_METRIC_VALUES[alarm_item["MetricName"]]["Period"])
return cloudwatch_alarm
def _construct_role(self, artifact_bucket_name, managed_policy_map):
"""Constructs an IAM:Role resource only if user doesn't specify Role property in Serverless Canary
- If the ArtifactS3Location property isn't specified then the the policies to execute the Canary and handle
the resulting data will be added
- If the Tracing property is enabled then the XRay policy based on the user's region will be added
- If the VpcConfig property is specified then the policy to execute VPC will be added
- If the Policies property is specified then the that will be appended to the IAM::Role's Policies property
:returns: the generated IAM Role
:rtype: model.iam.IAMRole
"""
role_attributes = self.get_passthrough_resource_attributes()
if self.AssumeRolePolicyDocument:
assume_role_policy_document = self.AssumeRolePolicyDocument
else:
assume_role_policy_document = IAMRolePolicies.lambda_assume_role_policy()
# add AWS managed policies if user has enabled VpcConfig or Tracing
managed_policy_arns = []
if self.VpcConfig:
managed_policy_arns.append(
ArnGenerator.generate_aws_managed_policy_arn("service-role/AWSLambdaVPCAccessExecutionRole")
)
if self.ActiveTracing is True:
managed_policy_name = get_xray_managed_policy_name()
managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn(managed_policy_name))
# if user has defined Policies property, those policies will be appended to this role
function_policies = ResourcePolicies(
{"Policies": self.Policies},
# No support for policy templates in the "core"
policy_template_processor=None,
)
# The policy to execute the canary is only added to the role if the user hasn't defined ArtifactS3Location
# this emulates CloudWatch Synthetics Canary dashboard's behavior
policy_documents = []
if self.ArtifactS3Location is None:
policy_documents.extend(
(
IAMRolePolicies.canary_put_artifacts_in_s3_policy(
logical_id=self.logical_id, result_bucket=artifact_bucket_name
),
IAMRolePolicies.canary_put_logs_policy(logical_id=self.logical_id),
IAMRolePolicies.canary_put_metric_data_policy(logical_id=self.logical_id),
)
)
execution_role = construct_role_for_resource(
resource_logical_id=self.logical_id,
attributes=role_attributes,
managed_policy_map=managed_policy_map,
assume_role_policy_document=assume_role_policy_document,
resource_policies=function_policies,
managed_policy_arns=managed_policy_arns,
policy_documents=policy_documents,
permissions_boundary=None,
tags=self._construct_tag_list(self.Tags),
)
return execution_role
def _construct_artifact_bucket(self):
"""Constructs a S3Bucket resource to store canary artifacts.
:returns: the generated S3Bucket
:rtype: model.s3.S3Bucket
"""
# Construct the LogicalId of S3Bucket by appending ArtifactBucket to the Canary LogicalId. Once deployed, the
# bucket name will be automatically generated by Cloudformation.
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html
logical_id = self.logical_id + "ArtifactBucket"
# Set the DeletionPolicy of the S3 resource to Retain to prevent CloudFormation from trying to delete the
# bucket when its not empty. This is necessary because if a user creates and runs a serverless canary without
# an artifact bucket, then tries to delete/replace that resource, CloudFormation will try to delete the
# artifact bucket made by SAM which will throw an error since its not empty. Retaining the bucket will bypass
# this error.
passthrough_attributes = self.get_passthrough_resource_attributes()
if passthrough_attributes is None:
passthrough_attributes = {}
passthrough_attributes["DeletionPolicy"] = "Retain"
s3bucket = S3Bucket(
logical_id=logical_id,
depends_on=self.depends_on,
attributes=passthrough_attributes,
)
s3bucket.BucketEncryption = {
"ServerSideEncryptionConfiguration": [{"ServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}}]
}
s3bucket.Tags = self._construct_tag_list(self.Tags)
return s3bucket
def _construct_synthetics_canary(self):
"""Constructs a AWS::Synthetics::Canary resource."""
canary = SyntheticsCanary(
self.logical_id, depends_on=self.depends_on, attributes=self.get_passthrough_resource_attributes()
)
canary.ArtifactS3Location = self.ArtifactS3Location
canary.Code = self._construct_code_dict
canary.ExecutionRoleArn = self.Role
canary.FailureRetentionPeriod = self.FailureRetentionPeriod
# constructs default name if FunctionName isn't provided because Synthetics Canary resource requires a Name,
# also requires it be lower case
canary.Name = self.FunctionName if self.FunctionName else self._construct_canary_name()
canary.RuntimeVersion = self.Runtime
canary.Schedule = self.Schedule
canary.StartCanaryAfterCreation = self.StartCanaryAfterCreation
canary.SuccessRetentionPeriod = self.SuccessRetentionPeriod
canary.Tags = self._construct_tag_list(self.Tags)
canary.VPCConfig = self.VpcConfig
if self.ActiveTracing or self.Environment or self.MemorySize or self.Timeout:
canary.RunConfig = self._construct_run_config()
return canary
def _construct_canary_name(self):
"""
Need to construct canary name since the Name property is required in AWS::Synthetics::Canary and CloudFormation
doesn't automatically generate one upon deployment
Synthetics Canary name is limited to 21 characters
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-synthetics-canary.html#cfn-synthetics-canary-name
len(prefix) + MAX_CANARY_LOGICAL_ID_LENGTH + MAX_CANARY_UNIQUE_ID_LENGTH + 1 (extra '-' char added) must be less
than or equal to 21
"""
logical_id_lowered = self.logical_id.lower()[:MAX_CANARY_LOGICAL_ID_LENGTH] + "-"
suffix = uuid.uuid4().hex[:MAX_CANARY_UNIQUE_ID_LENGTH]
return CANARY_NAME_PREFIX + logical_id_lowered + suffix
@staticmethod
def _extract_not_none_properties(d):
"""
Filters out and returns not None properties
"""
return {k: v for k, v in d if v is not None}
def _construct_run_config(self):
"""
If the user specifies any of Tracing, MemorySize, Timeout or Environment then the RunConfig resource in the
transformed AWS::Synthetics::Canary needs to be added. Note, for Environment property the syntax in
AWS::Serverless::Canary is
Environment:
Variables:
Var1: Var2
while in AWS::Synthetics::Canary its
EnvironmentVariables:
Var1: Var2
so it needs to be transformed accordingly
"""
runconfig = {
"ActiveTracing": self.ActiveTracing,
"MemoryInMB": self.MemorySize,
"TimeoutInSeconds": self.Timeout,
}
if self.Environment:
runconfig["EnvironmentVariables"] = self.Environment["Variables"]
return self._extract_not_none_properties(runconfig.items())
@property
def _construct_code_dict(self):
"""Constructs Synthetics Canary Code Dictionary based on the accepted SAM artifact properties such
as `InlineCode` and `CodeUri`
:raises InvalidResourceException when conditions on the SAM artifact properties are not met.
"""
# accepted artifacts
artifacts = {"InlineCode": self.InlineCode, "CodeUri": self.CodeUri}
filtered_artifacts = self._extract_not_none_properties(artifacts.items())
filtered_artifact_keys = list(filtered_artifacts.keys())
# Note: To emulate the same behavior as SAM Function, if user includes both InlineCode and CodeUri,
# InlineCode will take priority
if "InlineCode" in filtered_artifact_keys:
# Inline function for transformation of inline code.
return {"Handler": self.Handler, "Script": self.InlineCode}
elif "CodeUri" in filtered_artifact_keys:
# extracts Bucket and Key values, adds Handler and extracted values to Code object
code = construct_s3_location_object(self.CodeUri, self.logical_id, "CodeUri")
code["Handler"] = self.Handler
return code
else:
raise InvalidResourceException(self.logical_id, "Either 'InlineCode' or 'CodeUri' must be set.")
class SamApi(SamResourceMacro):
"""SAM rest API macro."""
resource_type = "AWS::Serverless::Api"
property_types = {
# Internal property set only by Implicit API plugin. If set to True, the API Event Source code will inject
# Lambda Integration URI to the Swagger. To preserve backwards compatibility, this must be set only for
# Implicit APIs. For Explicit APIs, customer is expected to set integration URI themselves.
# In the future, we might rename and expose this property to customers so they can have SAM manage Explicit APIs
# Swagger.
"__MANAGE_SWAGGER": PropertyType(False, is_type(bool)),
"Name": PropertyType(False, one_of(is_str(), is_type(dict))),
"StageName": PropertyType(True, one_of(is_str(), is_type(dict))),
"Tags": PropertyType(False, is_type(dict)),
"DefinitionBody": PropertyType(False, is_type(dict)),
"DefinitionUri": PropertyType(False, one_of(is_str(), is_type(dict))),
"CacheClusterEnabled": PropertyType(False, is_type(bool)),
"CacheClusterSize": PropertyType(False, is_str()),
"Variables": PropertyType(False, is_type(dict)),
"EndpointConfiguration": PropertyType(False, one_of(is_str(), is_type(dict))),
"MethodSettings": PropertyType(False, is_type(list)),
"BinaryMediaTypes": PropertyType(False, is_type(list)),
"MinimumCompressionSize": PropertyType(False, is_type(int)),
"Cors": PropertyType(False, one_of(is_str(), is_type(dict))),
"Auth": PropertyType(False, is_type(dict)),
"GatewayResponses": PropertyType(False, is_type(dict)),
"AccessLogSetting": PropertyType(False, is_type(dict)),
"CanarySetting": PropertyType(False, is_type(dict)),
"TracingEnabled": PropertyType(False, is_type(bool)),
"OpenApiVersion": PropertyType(False, is_str()),
"Models": PropertyType(False, is_type(dict)),
"Domain": PropertyType(False, is_type(dict)),
"Description": PropertyType(False, is_str()),
"Mode": PropertyType(False, is_str()),
}
referable_properties = {
"Stage": ApiGatewayStage.resource_type,
"Deployment": ApiGatewayDeployment.resource_type,
"DomainName": ApiGatewayDomainName.resource_type,
"UsagePlan": ApiGatewayUsagePlan.resource_type,
"UsagePlanKey": ApiGatewayUsagePlanKey.resource_type,
"ApiKey": ApiGatewayApiKey.resource_type,
}
def to_cloudformation(self, **kwargs):
"""Returns the API Gateway RestApi, Deployment, and Stage to which this SAM Api corresponds.
:param dict kwargs: already-converted resources that may need to be modified when converting this \
macro to pure CloudFormation
:returns: a list of vanilla CloudFormation Resources, to which this Function expands
:rtype: list
"""
resources = []
intrinsics_resolver = kwargs["intrinsics_resolver"]
self.BinaryMediaTypes = intrinsics_resolver.resolve_parameter_refs(self.BinaryMediaTypes)
self.Domain = intrinsics_resolver.resolve_parameter_refs(self.Domain)
self.Auth = intrinsics_resolver.resolve_parameter_refs(self.Auth)
redeploy_restapi_parameters = kwargs.get("redeploy_restapi_parameters")
shared_api_usage_plan = kwargs.get("shared_api_usage_plan")
template_conditions = kwargs.get("conditions")
api_generator = ApiGenerator(
self.logical_id,
self.CacheClusterEnabled,
self.CacheClusterSize,
self.Variables,
self.depends_on,
self.DefinitionBody,
self.DefinitionUri,
self.Name,
self.StageName,
shared_api_usage_plan,
template_conditions,
tags=self.Tags,
endpoint_configuration=self.EndpointConfiguration,
method_settings=self.MethodSettings,
binary_media=self.BinaryMediaTypes,
minimum_compression_size=self.MinimumCompressionSize,
cors=self.Cors,
auth=self.Auth,
gateway_responses=self.GatewayResponses,
access_log_setting=self.AccessLogSetting,
canary_setting=self.CanarySetting,
tracing_enabled=self.TracingEnabled,
resource_attributes=self.resource_attributes,
passthrough_resource_attributes=self.get_passthrough_resource_attributes(),
open_api_version=self.OpenApiVersion,
models=self.Models,
domain=self.Domain,
description=self.Description,
mode=self.Mode,
)
(
rest_api,
deployment,
stage,
permissions,
domain,
basepath_mapping,
route53,
usage_plan_resources,
) = api_generator.to_cloudformation(redeploy_restapi_parameters)
resources.extend([rest_api, deployment, stage])
resources.extend(permissions)
if domain:
resources.extend([domain])
if basepath_mapping:
resources.extend(basepath_mapping)
if route53:
resources.extend([route53])
# contains usage plan, api key and usageplan key resources
if usage_plan_resources:
resources.extend(usage_plan_resources)
return resources
class SamHttpApi(SamResourceMacro):
"""SAM rest API macro."""
resource_type = "AWS::Serverless::HttpApi"
property_types = {
# Internal property set only by Implicit HTTP API plugin. If set to True, the API Event Source code will
# inject Lambda Integration URI to the OpenAPI. To preserve backwards compatibility, this must be set only for
# Implicit APIs. For Explicit APIs, this is managed by the DefaultDefinitionBody Plugin.
# In the future, we might rename and expose this property to customers so they can have SAM manage Explicit APIs
# Swagger.
"__MANAGE_SWAGGER": PropertyType(False, is_type(bool)),
"StageName": PropertyType(False, one_of(is_str(), is_type(dict))),
"Tags": PropertyType(False, is_type(dict)),
"DefinitionBody": PropertyType(False, is_type(dict)),
"DefinitionUri": PropertyType(False, one_of(is_str(), is_type(dict))),
"StageVariables": PropertyType(False, is_type(dict)),
"CorsConfiguration": PropertyType(False, one_of(is_type(bool), is_type(dict))),
"AccessLogSettings": PropertyType(False, is_type(dict)),
"DefaultRouteSettings": PropertyType(False, is_type(dict)),
"Auth": PropertyType(False, is_type(dict)),
"RouteSettings": PropertyType(False, is_type(dict)),
"Domain": PropertyType(False, is_type(dict)),
"FailOnWarnings": PropertyType(False, is_type(bool)),
"Description": PropertyType(False, is_str()),
"DisableExecuteApiEndpoint": PropertyType(False, is_type(bool)),
}
referable_properties = {
"Stage": ApiGatewayV2Stage.resource_type,
"DomainName": ApiGatewayV2DomainName.resource_type,
}
def to_cloudformation(self, **kwargs):
"""Returns the API GatewayV2 Api, Deployment, and Stage to which this SAM Api corresponds.
:param dict kwargs: already-converted resources that may need to be modified when converting this \
macro to pure CloudFormation
:returns: a list of vanilla CloudFormation Resources, to which this Function expands
:rtype: list
"""
resources = []
intrinsics_resolver = kwargs["intrinsics_resolver"]
self.CorsConfiguration = intrinsics_resolver.resolve_parameter_refs(self.CorsConfiguration)
intrinsics_resolver = kwargs["intrinsics_resolver"]
self.Domain = intrinsics_resolver.resolve_parameter_refs(self.Domain)
api_generator = HttpApiGenerator(
self.logical_id,
self.StageVariables,
self.depends_on,
self.DefinitionBody,
self.DefinitionUri,
self.StageName,
tags=self.Tags,
auth=self.Auth,
cors_configuration=self.CorsConfiguration,
access_log_settings=self.AccessLogSettings,
route_settings=self.RouteSettings,
default_route_settings=self.DefaultRouteSettings,
resource_attributes=self.resource_attributes,
passthrough_resource_attributes=self.get_passthrough_resource_attributes(),
domain=self.Domain,
fail_on_warnings=self.FailOnWarnings,
description=self.Description,
disable_execute_api_endpoint=self.DisableExecuteApiEndpoint,
)
(
http_api,
stage,
domain,
basepath_mapping,
route53,
) = api_generator.to_cloudformation()
resources.append(http_api)
if domain:
resources.append(domain)
if basepath_mapping:
resources.extend(basepath_mapping)
if route53:
resources.append(route53)
# Stage is now optional. Only add it if one is created.
if stage:
resources.append(stage)
return resources
class SamSimpleTable(SamResourceMacro):
"""SAM simple table macro."""
resource_type = "AWS::Serverless::SimpleTable"
property_types = {
"PrimaryKey": PropertyType(False, dict_of(is_str(), is_str())),
"ProvisionedThroughput": PropertyType(False, dict_of(is_str(), one_of(is_type(int), is_type(dict)))),
"TableName": PropertyType(False, one_of(is_str(), is_type(dict))),
"Tags": PropertyType(False, is_type(dict)),
"SSESpecification": PropertyType(False, is_type(dict)),
}
attribute_type_conversions = {"String": "S", "Number": "N", "Binary": "B"}
def to_cloudformation(self, **kwargs):
dynamodb_resources = self._construct_dynamodb_table()
return [dynamodb_resources]
def _construct_dynamodb_table(self):
dynamodb_table = DynamoDBTable(self.logical_id, depends_on=self.depends_on, attributes=self.resource_attributes)
if self.PrimaryKey:
if "Name" not in self.PrimaryKey or "Type" not in self.PrimaryKey:
raise InvalidResourceException(
self.logical_id, "'PrimaryKey' is missing required Property 'Name' or 'Type'."
)
primary_key = {
"AttributeName": self.PrimaryKey["Name"],
"AttributeType": self._convert_attribute_type(self.PrimaryKey["Type"]),
}
else:
primary_key = {"AttributeName": "id", "AttributeType": "S"}
dynamodb_table.AttributeDefinitions = [primary_key]
dynamodb_table.KeySchema = [{"AttributeName": primary_key["AttributeName"], "KeyType": "HASH"}]
if self.ProvisionedThroughput:
dynamodb_table.ProvisionedThroughput = self.ProvisionedThroughput
else:
dynamodb_table.BillingMode = "PAY_PER_REQUEST"
if self.SSESpecification:
dynamodb_table.SSESpecification = self.SSESpecification
if self.TableName:
dynamodb_table.TableName = self.TableName
if bool(self.Tags):
dynamodb_table.Tags = get_tag_list(self.Tags)
return dynamodb_table
def _convert_attribute_type(self, attribute_type):
if attribute_type in self.attribute_type_conversions:
return self.attribute_type_conversions[attribute_type]
raise InvalidResourceException(self.logical_id, "Invalid 'Type' \"{actual}\".".format(actual=attribute_type))
class SamApplication(SamResourceMacro):
"""SAM application macro."""
APPLICATION_ID_KEY = "ApplicationId"
SEMANTIC_VERSION_KEY = "SemanticVersion"
resource_type = "AWS::Serverless::Application"
# The plugin will always insert the TemplateUrl parameter
property_types = {
"Location": PropertyType(True, one_of(is_str(), is_type(dict))),
"TemplateUrl": PropertyType(False, is_str()),
"Parameters": PropertyType(False, is_type(dict)),
"NotificationARNs": PropertyType(False, list_of(one_of(is_str(), is_type(dict)))),
"Tags": PropertyType(False, is_type(dict)),
"TimeoutInMinutes": PropertyType(False, is_type(int)),
}
def to_cloudformation(self, **kwargs):
"""Returns the stack with the proper parameters for this application"""
nested_stack = self._construct_nested_stack()
return [nested_stack]
def _construct_nested_stack(self):
"""Constructs a AWS::CloudFormation::Stack resource"""
nested_stack = NestedStack(
self.logical_id, depends_on=self.depends_on, attributes=self.get_passthrough_resource_attributes()
)
nested_stack.Parameters = self.Parameters
nested_stack.NotificationARNs = self.NotificationARNs
application_tags = self._get_application_tags()
nested_stack.Tags = self._construct_tag_list(self.Tags, application_tags)
nested_stack.TimeoutInMinutes = self.TimeoutInMinutes
nested_stack.TemplateURL = self.TemplateUrl if self.TemplateUrl else ""
return nested_stack
def _get_application_tags(self):
"""Adds tags to the stack if this resource is using the serverless app repo"""
application_tags = {}
if isinstance(self.Location, dict):
if self.APPLICATION_ID_KEY in self.Location.keys() and self.Location[self.APPLICATION_ID_KEY] is not None:
application_tags[self._SAR_APP_KEY] = self.Location[self.APPLICATION_ID_KEY]
if (
self.SEMANTIC_VERSION_KEY in self.Location.keys()
and self.Location[self.SEMANTIC_VERSION_KEY] is not None
):
application_tags[self._SAR_SEMVER_KEY] = self.Location[self.SEMANTIC_VERSION_KEY]
return application_tags
class SamLayerVersion(SamResourceMacro):
"""SAM Layer macro"""
resource_type = "AWS::Serverless::LayerVersion"
property_types = {
"LayerName": PropertyType(False, one_of(is_str(), is_type(dict))),
"Description": PropertyType(False, is_str()),
"ContentUri": PropertyType(True, one_of(is_str(), is_type(dict))),
"CompatibleRuntimes": PropertyType(False, list_of(one_of(is_str(), is_type(dict)))),
"LicenseInfo": PropertyType(False, is_str()),
"RetentionPolicy": PropertyType(False, is_str()),
}
RETAIN = "Retain"
DELETE = "Delete"
retention_policy_options = [RETAIN.lower(), DELETE.lower()]
def to_cloudformation(self, **kwargs):
"""Returns the Lambda layer to which this SAM Layer corresponds.
:param dict kwargs: already-converted resources that may need to be modified when converting this \
macro to pure CloudFormation
:returns: a list of vanilla CloudFormation Resources, to which this Function expands
:rtype: list
"""
resources = []
# Append any CFN resources:
intrinsics_resolver = kwargs["intrinsics_resolver"]
resources.append(self._construct_lambda_layer(intrinsics_resolver))
return resources
def _construct_lambda_layer(self, intrinsics_resolver):
"""Constructs and returns the Lambda function.
:returns: a list containing the Lambda function and execution role resources
:rtype: list
"""
# Resolve intrinsics if applicable:
self.LayerName = self._resolve_string_parameter(intrinsics_resolver, self.LayerName, "LayerName")
self.LicenseInfo = self._resolve_string_parameter(intrinsics_resolver, self.LicenseInfo, "LicenseInfo")
self.Description = self._resolve_string_parameter(intrinsics_resolver, self.Description, "Description")
self.RetentionPolicy = self._resolve_string_parameter(
intrinsics_resolver, self.RetentionPolicy, "RetentionPolicy"
)
# If nothing defined, this will be set to Retain
retention_policy_value = self._get_retention_policy_value()
attributes = self.get_passthrough_resource_attributes()
if attributes is None:
attributes = {}
if "DeletionPolicy" not in attributes:
attributes["DeletionPolicy"] = self.RETAIN
if retention_policy_value is not None:
attributes["DeletionPolicy"] = retention_policy_value
old_logical_id = self.logical_id
# This is to prevent the passthrough resource attributes to be included for hashing
hash_dict = copy.deepcopy(self.to_dict())
if "DeletionPolicy" in hash_dict.get(old_logical_id):
del hash_dict[old_logical_id]["DeletionPolicy"]
if "UpdateReplacePolicy" in hash_dict.get(old_logical_id):
del hash_dict[old_logical_id]["UpdateReplacePolicy"]
if "Metadata" in hash_dict.get(old_logical_id):
del hash_dict[old_logical_id]["Metadata"]
new_logical_id = logical_id_generator.LogicalIdGenerator(old_logical_id, hash_dict).gen()
self.logical_id = new_logical_id
lambda_layer = LambdaLayerVersion(self.logical_id, depends_on=self.depends_on, attributes=attributes)
# Changing the LayerName property: when a layer is published, it is given an Arn
# example: arn:aws:lambda:us-west-2:123456789012:layer:MyLayer:1
# where MyLayer is the LayerName property if it exists; otherwise, it is the
# LogicalId of this resource. Since a LayerVersion is an immutable resource, when
# CloudFormation updates this resource, it will ALWAYS create a new version then
# delete the old version if the logical ids match. What this does is change the
# logical id of every layer (so a `DeletionPolicy: Retain` can work) and set the
# LayerName property of the layer so that the Arn will still always be the same
# with the exception of an incrementing version number.
if not self.LayerName:
self.LayerName = old_logical_id
lambda_layer.LayerName = self.LayerName
lambda_layer.Description = self.Description
lambda_layer.Content = construct_s3_location_object(self.ContentUri, self.logical_id, "ContentUri")
lambda_layer.CompatibleRuntimes = self.CompatibleRuntimes
lambda_layer.LicenseInfo = self.LicenseInfo
return lambda_layer
def _get_retention_policy_value(self):
"""
Sets the deletion policy on this resource. The default is 'Retain'.
:return: value for the DeletionPolicy attribute.
"""
if self.RetentionPolicy is None:
return None
elif self.RetentionPolicy.lower() == self.RETAIN.lower():
return self.RETAIN
elif self.RetentionPolicy.lower() == self.DELETE.lower():
return self.DELETE
elif self.RetentionPolicy.lower() not in self.retention_policy_options:
raise InvalidResourceException(
self.logical_id,
"'{}' must be one of the following options: {}.".format("RetentionPolicy", [self.RETAIN, self.DELETE]),
)
class SamStateMachine(SamResourceMacro):
"""SAM state machine macro."""
resource_type = "AWS::Serverless::StateMachine"
property_types = {
"Definition": PropertyType(False, is_type(dict)),
"DefinitionUri": PropertyType(False, one_of(is_str(), is_type(dict))),
"Logging": PropertyType(False, is_type(dict)),
"Role": PropertyType(False, is_str()),
"DefinitionSubstitutions": PropertyType(False, is_type(dict)),
"Events": PropertyType(False, dict_of(is_str(), is_type(dict))),
"Name": PropertyType(False, is_str()),
"Type": PropertyType(False, is_str()),
"Tags": PropertyType(False, is_type(dict)),
"Policies": PropertyType(False, one_of(is_str(), list_of(one_of(is_str(), is_type(dict), is_type(dict))))),
"Tracing": PropertyType(False, is_type(dict)),
"PermissionsBoundary": PropertyType(False, is_str()),
}
event_resolver = ResourceTypeResolver(
samtranslator.model.stepfunctions.events,
)
def to_cloudformation(self, **kwargs):
managed_policy_map = kwargs.get("managed_policy_map", {})
intrinsics_resolver = kwargs["intrinsics_resolver"]
event_resources = kwargs["event_resources"]
state_machine_generator = StateMachineGenerator(
logical_id=self.logical_id,
depends_on=self.depends_on,
managed_policy_map=managed_policy_map,
intrinsics_resolver=intrinsics_resolver,
definition=self.Definition,
definition_uri=self.DefinitionUri,
logging=self.Logging,
name=self.Name,
policies=self.Policies,
permissions_boundary=self.PermissionsBoundary,
definition_substitutions=self.DefinitionSubstitutions,
role=self.Role,
state_machine_type=self.Type,
tracing=self.Tracing,
events=self.Events,
event_resources=event_resources,
event_resolver=self.event_resolver,
tags=self.Tags,
resource_attributes=self.resource_attributes,
passthrough_resource_attributes=self.get_passthrough_resource_attributes(),
)
resources = state_machine_generator.to_cloudformation()
return resources
def resources_to_link(self, resources):
try:
return {"event_resources": self._event_resources_to_link(resources)}
except InvalidEventException as e:
raise InvalidResourceException(self.logical_id, e.message)
def _event_resources_to_link(self, resources):
event_resources = {}
if self.Events:
for logical_id, event_dict in self.Events.items():
try:
event_source = self.event_resolver.resolve_resource_type(event_dict).from_dict(
self.logical_id + logical_id, event_dict, logical_id
)
except (TypeError, AttributeError) as e:
raise InvalidEventException(logical_id, "{}".format(e))
event_resources[logical_id] = event_source.resources_to_link(resources)
return event_resources
| 47.333922
| 205
| 0.674617
| 76,939
| 0.95725
| 0
| 0
| 2,252
| 0.028019
| 0
| 0
| 26,146
| 0.3253
|
dd7bb92f20393f9e12f7115e84f70223584f154d
| 928
|
py
|
Python
|
media_logs/urls/audios.py
|
Akshayvm98/Django-School
|
723d52db2cd3bc7665680a3adaf8687f97836d48
|
[
"MIT"
] | 26
|
2015-08-04T00:13:27.000Z
|
2021-03-19T01:01:14.000Z
|
media_logs/urls/audios.py
|
Akshayvm98/Django-School
|
723d52db2cd3bc7665680a3adaf8687f97836d48
|
[
"MIT"
] | null | null | null |
media_logs/urls/audios.py
|
Akshayvm98/Django-School
|
723d52db2cd3bc7665680a3adaf8687f97836d48
|
[
"MIT"
] | 28
|
2015-01-19T15:10:15.000Z
|
2020-10-27T11:22:21.000Z
|
from django.conf.urls.defaults import *
from media_logs.models import *
audio_list = {
'queryset': Audio.objects.all(),
}
audio_set_list = {
'queryset': AudioSet.objects.all(),
}
urlpatterns = patterns('',
url(
regex = '^sets/(?P<slug>[-\w]+)/$',
view = 'django.views.generic.list_detail.object_detail',
kwargs = audio_set_list,
name = 'audio_set_detail',
),
url (
regex = '^sets/$',
view = 'django.views.generic.list_detail.object_list',
kwargs = audio_set_list,
name = 'audio_set_list',
),
url(
regex = '^(?P<user>[-\w]+)/(?P<slug>[-\w]+)/$',
#regex = '^(?P<slug>[-\w]+)/$',
view = 'views.generic.list_detail.object_detail',
kwargs = audio_list,
name = 'audio_detail',
),
url (
regex = '^$',
view = 'django.views.generic.list_detail.object_list',
kwargs = audio_list,
name = 'audio_list',
),
)
| 23.2
| 63
| 0.574353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 373
| 0.40194
|
dd7d61b4fcf318d454a05f755e0919c0dd18ea88
| 2,964
|
py
|
Python
|
pycalc/MAVProxy/modules/mavproxy_gopro.py
|
joakimzhang/python-electron
|
79bc174a14c5286ca739bb7d8ce6522fdc6e9e80
|
[
"CC0-1.0"
] | null | null | null |
pycalc/MAVProxy/modules/mavproxy_gopro.py
|
joakimzhang/python-electron
|
79bc174a14c5286ca739bb7d8ce6522fdc6e9e80
|
[
"CC0-1.0"
] | 8
|
2021-01-28T19:26:22.000Z
|
2022-03-24T18:07:24.000Z
|
pycalc/MAVProxy/modules/mavproxy_gopro.py
|
joakimzhang/python-electron
|
79bc174a14c5286ca739bb7d8ce6522fdc6e9e80
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
'''gopro control over mavlink for the solo-gimbal
To use this module connect to a Solo with a GoPro installed on the gimbal.
'''
import time, os
from MAVProxy.modules.lib import mp_module
from pymavlink import mavutil
class GoProModule(mp_module.MPModule):
def __init__(self, mpstate):
super(GoProModule, self).__init__(mpstate, "gopro", "gopro handling")
self.add_command('gopro', self.cmd_gopro, 'gopro control', [
'status',
'shutter <start|stop>',
'mode <video|camera>',
'power <on|off>'])
def cmd_gopro(self, args):
'''gopro commands'''
usage = "status, shutter <start|stop>, mode <video|camera>, power <on|off>"
mav = self.master.mav
if args[0] == "status":
self.cmd_gopro_status(args[1:])
return
if args[0] == "shutter":
name = args[1].lower()
if name == 'start':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_SHUTTER, 1)
return
elif name == 'stop':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_SHUTTER, 0)
return
else:
print("unrecognized")
return
if args[0] == "mode":
name = args[1].lower()
if name == 'video':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_CAPTURE_MODE, 0)
return
elif name == 'camera':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_CAPTURE_MODE, 1)
return
else:
print("unrecognized")
return
if args[0] == "power":
name = args[1].lower()
if name == 'on':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_POWER, 1)
return
elif name == 'off':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_POWER, 0)
return
else:
print("unrecognized")
return
print(usage)
def cmd_gopro_status(self, args):
'''show gopro status'''
master = self.master
if 'GOPRO_HEARTBEAT' in master.messages:
print(master.messages['GOPRO_HEARTBEAT'])
else:
print("No GOPRO_HEARTBEAT messages")
def init(mpstate):
'''initialise module'''
return GoProModule(mpstate)
| 34.068966
| 83
| 0.5361
| 2,639
| 0.890351
| 0
| 0
| 0
| 0
| 0
| 0
| 567
| 0.191296
|
dd7ea8658ced22af8cde597e98ea9827577d3995
| 343
|
py
|
Python
|
strings.py
|
aemreb/telegram_paybot
|
f973ddd9029f2844901508fa983aa37b6ca93089
|
[
"MIT"
] | 2
|
2021-09-17T10:55:14.000Z
|
2021-09-17T10:55:38.000Z
|
strings.py
|
aemreb/telegram_paybot
|
f973ddd9029f2844901508fa983aa37b6ca93089
|
[
"MIT"
] | null | null | null |
strings.py
|
aemreb/telegram_paybot
|
f973ddd9029f2844901508fa983aa37b6ca93089
|
[
"MIT"
] | 1
|
2021-05-31T18:23:02.000Z
|
2021-05-31T18:23:02.000Z
|
help = '''Hey 👋 \n\n /signup nick: enter your nick and sign up \n\n /atm: see your balance \n\n /send nick amount: send this nick that amount of buxx 💰 \n '''
signup = '''Hi! Type /signup to sign up.'''
user_created = '''Created user. Welcome to Paybot 🤑'''
user_exists = '''User already exists ☹️'''
not_enough_buxx = '''Not enough Buxx 🙄'''
| 57.166667
| 158
| 0.655977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 291
| 0.810585
|
dd7f9dbcfe5bd13ce56beb5ae807d4bb63f3c4df
| 1,609
|
py
|
Python
|
Program_python/Extractfolderimage.py
|
pection/MN-furniture
|
4c796f072662c15b2a263272ef2637e221c42cab
|
[
"MIT"
] | 1
|
2022-02-22T06:20:56.000Z
|
2022-02-22T06:20:56.000Z
|
Program_python/Extractfolderimage.py
|
pection/MN-furniture
|
4c796f072662c15b2a263272ef2637e221c42cab
|
[
"MIT"
] | null | null | null |
Program_python/Extractfolderimage.py
|
pection/MN-furniture
|
4c796f072662c15b2a263272ef2637e221c42cab
|
[
"MIT"
] | 1
|
2020-11-24T18:18:42.000Z
|
2020-11-24T18:18:42.000Z
|
import os
import sys
import numpy as np
from PIL import Image
num=1
path ="/Users/pection/Documents/mn_furniture/AddwatermarkProgram/Lastday/"
#we shall store all the file names in this list
filelist=[]
for root, dirs, files in os.walk(path):
for file in files:
if(file.endswith(".jpg")):
filelist.append(os.path.join(root,file))
print (filelist)
logo=Image.open('logo.png')
logo2=Image.open('logo2.png')
watermark = Image.open('WatermarkB5.png')
watermark2 = Image.open('WatermarkB3.png')
logoWidth = watermark.width
logoHeight = watermark.height
watermarkW=watermark.width
watermarkH=watermark.height
logo2Width = watermark2.width
logo2Height = watermark2.height
for filename in filelist:
image = Image.open(filename)
# imageWidth = image.width
# imageHeight = image.height
# if imageWidth<500 :
# img_w, img_h = image.size
# bg_w, bg_h = watermark2.size
# offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
# image.paste(logo2, (0, 0), logo2)
# image2=image.copy()
# image2.paste(watermark2,(int((img_w-logo2Width)/2),int((img_h-logo2Height)/2)),watermark2)
# else :
# img_w, img_h = image.size
# bg_w, bg_h = watermark.size
# offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
# image.paste(logo, (0, 0), logo)
# image2=image.copy()
# image2.paste(watermark,(int((img_w-logoWidth)/2),int((img_h-logoHeight)/2)),watermark)
num += 1
# image.save(filename)
image.save('/Users/pection/Documents/mn_furniture/AddwatermarkProgram/Extract/'+str(num)+'.png')
| 35.755556
| 100
| 0.661902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 912
| 0.566812
|
dd7fb45e0f3cff64598edf9ddf119adc6b039b8e
| 1,986
|
py
|
Python
|
BrainML/__init__.py
|
bogdan124/DeepML
|
ad5e904cc9fcd3c499bbca3538525d83fde003f5
|
[
"Apache-2.0"
] | null | null | null |
BrainML/__init__.py
|
bogdan124/DeepML
|
ad5e904cc9fcd3c499bbca3538525d83fde003f5
|
[
"Apache-2.0"
] | null | null | null |
BrainML/__init__.py
|
bogdan124/DeepML
|
ad5e904cc9fcd3c499bbca3538525d83fde003f5
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from BrainML.activation import Activator
from BrainML.layers import *
from BrainML.optimizer import Optimizer
from tensorflow.python.util import deprecation
##deprecation._PRINT_DEPRECATION_WARNINGS = False
##tf.compat.v1.disable_eager_execution()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class Network:
def __init__(self,layers=None, name=None):
self.model=None
self.output=None
self.layers=layers
self.compile=None
self.name=name
newLayers=[]
##if layers[0].shape!=None:
## newLayers.append(tf.keras.Input(input_shape=layers[0].shape))
for i in range(0,len(layers)):
newLayers.append(self.layers[i].layer)
##newLayers[i].value_to_feed=
self.model=tf.keras.Sequential()##newLayers, name
for i in newLayers:
self.model.add(i)
def train(self,x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None,
validation_split=0.0, validation_data=None, shuffle=True, class_weight=None,
sample_weight=None, initial_epoch=0, steps_per_epoch=None,
validation_steps=None, validation_batch_size=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=False,optimizer='rmsprop',
loss=None, metrics=None, loss_weights=None,weighted_metrics=None, run_eagerly=None):
if loss==None:
loss="mse"
elif metrics==None or metrics[0]=="all":
metrics=["mae", "acc"]
else:
optimizer="rmsprop"
self.compile=self.model.compile(optimizer, loss, metrics, loss_weights,weighted_metrics, run_eagerly)##initial_epoch,steps_per_epoch
self.output=self.model.fit(x, y, batch_size, epochs, verbose, callbacks,validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
return self.output
def Summary(self):
self.model.summary()
## if __name__ == "__main__":
## pass
| 38.192308
| 268
| 0.735146
| 1,617
| 0.814199
| 0
| 0
| 0
| 0
| 0
| 0
| 360
| 0.181269
|
dd80e90be1c610d2c46bc8b8b02fd6070d94ee6d
| 2,900
|
py
|
Python
|
detection/pixel_link/util/dec.py
|
HLIG/HUAWEI_OCR2019
|
1070d6291072e0223c2624f686766d0f3065e9c6
|
[
"MIT"
] | 211
|
2020-04-13T02:56:57.000Z
|
2022-03-30T11:22:46.000Z
|
detection/pixel_link/util/dec.py
|
HLIG/HUAWEI_OCR2019
|
1070d6291072e0223c2624f686766d0f3065e9c6
|
[
"MIT"
] | 41
|
2020-04-14T20:08:58.000Z
|
2022-03-28T15:08:30.000Z
|
detection/pixel_link/util/dec.py
|
HLIG/HUAWEI_OCR2019
|
1070d6291072e0223c2624f686766d0f3065e9c6
|
[
"MIT"
] | 47
|
2020-04-13T10:17:11.000Z
|
2022-02-21T02:06:16.000Z
|
#encoding=utf-8
import logging
import time
def print_calling(fn):
def wrapper(*args1, ** args2):
s = "calling function %s"%(fn.__name__)
logging.info(s)
start = time.time()
ret = fn(*args1, **args2)
end = time.time()
# s = "%s. time used = %f seconds"%(s, (end - start))
s = "function [%s] has been called, taking %f seconds"%(fn.__name__, (end - start))
logging.debug(s)
return ret
return wrapper
def print_test(fn):
def wrapper(*args1, ** args2):
s = "running test: %s..."%(fn.__name__)
logging.info(s)
ret = fn(*args1, **args2)
s = "running test: %s...succeed"%(fn.__name__)
logging.debug(s)
return ret
return wrapper
def print_calling_in_short(fn):
def wrapper(*args1, ** args2):
start = time.time()
ret = fn(*args1, **args2)
end = time.time()
s = "function [%s] has been called, taking %f seconds"%(fn.__name__, (end - start))
logging.debug(s)
return ret
return wrapper
import collections
counter = collections.defaultdict(int)
count_times =collections.defaultdict(int)
def print_calling_in_short_for_tf(fn):
import tensorflow as tf
import util
def wrapper(*args1, ** args2):
start = time.time()
thread_name = util.thread.get_current_thread_name()
ret = fn(*args1, **args2)
end = time.time()
counter[fn.__name__] = counter[fn.__name__] + (end - start)
count_times[fn.__name__] += 1
all_time = sum([counter[name] for name in counter]) * 1.0
for name in counter:
# tf.logging.info('\t %s: %f, %f seconds'%(name, counter[name] / all_time, counter[name]))
tf.logging.info('\t %s: %d callings, %fsper calling'%(name, count_times[name], counter[name] * 1.0 / count_times[name]))
s = "Thread [%s]:function [%s] has been called, taking %f seconds"%(thread_name, fn.__name__, (end - start))
tf.logging.info(s)
return ret
return wrapper
def timeit(fn):
import util
def wrapper(*args1, ** args2):
start = time.time()
thread_name = util.thread.get_current_thread_name()
ret = fn(*args1, **args2)
end = time.time()
counter[fn.__name__] = counter[fn.__name__] + (end - start)
count_times[fn.__name__] += 1
all_time = sum([counter[name] for name in counter]) * 1.0
for name in counter:
logging.info('\t %s: %f, %f seconds'%(name, counter[name] / all_time, counter[name]))
logging.info('\t %s: %d callings, %f seconds per calling'%(name, count_times[name], counter[name] * 1.0 / count_times[name]))
s = "Thread [%s]:function [%s] has been called, taking %f seconds"%(thread_name, fn.__name__, (end - start))
# logging.info(s)
return ret
return wrapper
| 36.708861
| 137
| 0.589655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 600
| 0.206897
|
dd8112f277127d953f0d1ded63b446df7043645e
| 2,955
|
py
|
Python
|
problems/graph-valid-tree.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
problems/graph-valid-tree.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
problems/graph-valid-tree.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
"""
First of all a tree of N nodes must have exactly N-1 edges.
2 nodes need 1 edge to connect. 3 nodes need 2 edges to connect...
Just draw one or two, you will know it.
Valid tree don't have cycles, there are two ways to detect it.
DFS. and union find. Union find is more suitable in this sutuation.
1. Union find.
We use an array 'markset' to store the root node of each node. [0]
So if markset[1]==3, it means node1's root is node3.
markset[6]==4, node6's root is node4.
we use find() to find the node's root. [1]
For example if node1's root is node3.
In the recursion, we find out that node3's root is node5.
we return and set node5 as node1's real root.
If a node don't have root then the root is itselves.
Imagine an edge. 1<->6 [2]
union()'s mission is to find out if node1 and node6 share the same root before we know 1<->6 exist.
If node1 and node6 share the same root before we know the edge 1<->6,
There must be a cycle between node1, node6 and their root.
A special situation is that
1<->2, 3<->4, 3<->5 (We have two trees that are not connected)
1 and 3 will share -1 as 'root', this means that they are not connected.
But a valid tree should be connected and only have one and only root.
The time complexity is O(NLogN), becuase we run a loop for every edges.
And the number of edges is equal to N-1
for every edge we use find() to find the root of two nodes
The recursion takes the height of the tree, which is LogN
N is the number of nodes.
Space complexity is O(N).
2. DFS
We use dfs to find if there are cycles in the tree.
If we visit the node that we visited which means there is a cycle.
Since this is an undirected map, we have to add both ways to the adjacent list.
And everytime we use an edge, we need to remove the counter part of it to avoid repeat.
finally, we have to check if we visit all the nodes. because there may be unconnected nodes.
The time complexity is O(N+E). Because this is DFS search in adjacent list.
Space complexity is O(N).
"""
class Solution(object):
def validTree(self, n, edges):
def union(n1, n2): #[2]
n1_root = find(n1)
n2_root = find(n2)
if n1_root==n2_root: return True
markset[n2_root] = n1_root
return False
def find(node): #[1]
if markset[node]==-1: return node
return find(markset[node])
if len(edges)!=n-1: return False
markset = [-1 for _ in xrange(n)] #[0]
for edge in edges:
if union(edge[0], edge[1]): return False
return True
def validTree(self, n, edges):
if n!=len(edges)+1: return False
graph = collections.defaultdict(list)
stack = []
visited = set()
for edge in edges:
graph[edge[0]].append(edge[1])
graph[edge[1]].append(edge[0])
if len(edges)>0:
stack.append(edges[0][0])
while stack:
node = stack.pop()
if node in visited: return False
visited.add(node)
for nb in graph[node]:
stack.append(nb)
graph[nb].remove(node)
if len(visited)!=n: return False
return True
| 31.105263
| 99
| 0.702876
| 456
| 0.154315
| 0
| 0
| 0
| 0
| 0
| 0
| 1,998
| 0.676142
|
dd81524e1e000d2bbdd8e39c55a281ea1c78ab94
| 1,336
|
py
|
Python
|
config.py
|
MGorr/icons_updater
|
aa9f9177a565fbe590cf959f625f049024e01efb
|
[
"MIT"
] | 1
|
2021-06-18T06:58:15.000Z
|
2021-06-18T06:58:15.000Z
|
config.py
|
MGorr/icons_updater
|
aa9f9177a565fbe590cf959f625f049024e01efb
|
[
"MIT"
] | null | null | null |
config.py
|
MGorr/icons_updater
|
aa9f9177a565fbe590cf959f625f049024e01efb
|
[
"MIT"
] | null | null | null |
"""Configuration class for icons updating."""
import os
from configparser import ConfigParser
_DESTINATION_NAME = 'dst'
_MAGICK_NAME = 'path'
_SOURCES_NAME = 'src'
class Config:
"""Configuration class."""
def __init__(self, config_file=None, src=None, dst=None):
"""Constructor."""
parser = ConfigParser()
if config_file:
parser.read(config_file)
section = parser['settings'] if config_file else None
if config_file and _MAGICK_NAME in section:
os.environ['PATH'] += os.pathsep + \
os.path.abspath(section[_MAGICK_NAME])
if not src and config_file:
src = section[_SOURCES_NAME]
elif not src:
raise RuntimeError('Source folder should be set!')
self._src = os.path.normpath(src)
if not dst and config_file:
dst = section[_DESTINATION_NAME]
elif not dst:
raise RuntimeError('Destination folder should be set!')
self._dst = os.path.normpath(dst)
assert self._dst, 'Destination folder should be set!'
assert self._src, 'Sources folder should be set!'
def destination(self):
"""Destination folder."""
return self._dst
def sources(self):
"""Sources folder."""
return self._src
| 30.363636
| 72
| 0.610778
| 1,166
| 0.872754
| 0
| 0
| 0
| 0
| 0
| 0
| 298
| 0.223054
|
dd820e8c14626563e752d4624969a2315eb254b9
| 8,831
|
py
|
Python
|
project/settings.py
|
ziibii88/The_Doe_Agency
|
2545aeae71c779166bef78941cac36551498ca76
|
[
"MIT"
] | 2
|
2021-07-16T10:25:15.000Z
|
2021-08-07T04:44:08.000Z
|
project/settings.py
|
ziibii88/The_Doe_Agency
|
2545aeae71c779166bef78941cac36551498ca76
|
[
"MIT"
] | null | null | null |
project/settings.py
|
ziibii88/The_Doe_Agency
|
2545aeae71c779166bef78941cac36551498ca76
|
[
"MIT"
] | null | null | null |
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from datetime import timedelta
from pathlib import Path
from decouple import config, Csv
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from .logging import CONFIG as LOG_CONFIG
ENVIRON = config("ENVIRON", default="prod")
if ENVIRON == "prod":
sentry_sdk.init(
dsn="https://ad9a9f987fa949a899c3b890ef4cd112"
"@o354850.ingest.sentry.io/5868398",
integrations=[DjangoIntegration()],
environment=ENVIRON,
traces_sample_rate=1.0,
send_default_pii=True,
release="tda@0.1.0", # change in poetry as well
)
LOGGING = LOG_CONFIG
PROJECT_NAME = "The Doe Agency"
PROJECT_SLUG = "the-doe-agency"
PROJECT_CODE = "TDA"
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DEBUG", default=False, cast=bool)
ALLOWED_HOSTS = config("ALLOWED_HOSTS", default=[], cast=Csv())
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#configuring-internal-ips
INTERNAL_IPS = config("INTERNAL_IPS", default=[], cast=Csv())
# Application definition ---------------------------------------------------- #
INSTALLED_APPS = [
# django apps
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"django.contrib.humanize",
# 3rd party apps
"django_filters",
"rest_framework",
"rest_framework_simplejwt",
"django_celery_results",
"django_celery_beat",
# health_check
"health_check",
"health_check.db",
"health_check.cache",
"health_check.storage",
"health_check.contrib.migrations",
# 'health_check.contrib.celery',
# 'health_check.contrib.celery_ping',
"health_check.contrib.psutil",
# 'health_check.contrib.s3boto3_storage',
# 'health_check.contrib.rabbitmq',
# 'health_check.contrib.redis',
# project apps
"core",
"scraper",
]
SITE_ID = 1 # Sites framework
if ENVIRON == "dev":
INSTALLED_APPS.append("debug_toolbar")
MIDDLEWARE = [
"debug_toolbar.middleware.DebugToolbarMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
AUTH_USER_MODEL = "core.User"
TEST_URL = "http://httpbin.zube.xyz/ip"
WSGI_APPLICATION = "project.wsgi.application"
# Database ------------------------------------------------------------------ #
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": config("DB_ENGINE", default="django.db.backends.postgresql"),
"HOST": config("DB_HOST", default="127.0.0.1"),
"PORT": config("DB_PORT", default=5432, cast=int),
"NAME": config("DB_NAME", default="tda_db"),
"USER": config("DB_USER", default="tda_user"),
"PASSWORD": config("DB_PASS", default="tda_pass"),
}
}
# Password validation ------------------------------------------------------- #
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation"
".UserAttributeSimilarityValidator"
},
{
"NAME": "django.contrib.auth.password_validation"
".MinimumLengthValidator"
},
{
"NAME": "django.contrib.auth.password_validation"
".CommonPasswordValidator"
},
{
"NAME": "django.contrib.auth.password_validation"
".NumericPasswordValidator"
},
]
# Internationalization ------------------------------------------------------ #
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = config("TIME_ZONE", default="UTC")
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images) ------------------------------------ #
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = (
BASE_DIR / "static"
) # production, don't forget to run collectstatic
STATICFILES_DIRS = [
BASE_DIR / "staticfiles",
] # development environment
MEDIA_URL = "/media/"
MEDIA_ROOT = BASE_DIR / "media"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# Django REST Framework -------------------------------------------------------
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
# https://django-rest-framework-simplejwt.readthedocs.io/en/latest/getting_started.html#installation
"rest_framework_simplejwt.authentication.JWTAuthentication",
# "rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.IsAuthenticated"
],
"DEFAULT_FILTER_BACKENDS": [
# django-filters
# https://www.django-rest-framework.org/api-guide/filtering/
# https://django-filter.readthedocs.io/en/latest/guide/rest_framework.html
"django_filters.rest_framework.DjangoFilterBackend",
# https://www.django-rest-framework.org/api-guide/filtering/#searchfilter
"rest_framework.filters.SearchFilter",
# https://www.django-rest-framework.org/api-guide/filtering/#orderingfilter
"rest_framework.filters.OrderingFilter",
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination"
".PageNumberPagination",
"PAGE_SIZE": 100,
}
# DRF SimpleJWT ---------------------------------------------------------------
# https://django-rest-framework-simplejwt.readthedocs.io/en/latest/settings.html
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(minutes=15),
"REFRESH_TOKEN_LIFETIME": timedelta(days=1),
"ROTATE_REFRESH_TOKENS": False,
"BLACKLIST_AFTER_ROTATION": True,
"UPDATE_LAST_LOGIN": True,
"ALGORITHM": "HS256",
"SIGNING_KEY": SECRET_KEY,
"VERIFYING_KEY": None,
"AUDIENCE": None,
"ISSUER": None,
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
"USER_ID_FIELD": "id",
"USER_ID_CLAIM": "user_id",
"USER_AUTHENTICATION_RULE": "rest_framework_simplejwt.authentication"
".default_user_authentication_rule",
"AUTH_TOKEN_CLASSES": ("rest_framework_simplejwt.tokens.AccessToken",),
"TOKEN_TYPE_CLAIM": "token_type",
"JTI_CLAIM": "jti",
"SLIDING_TOKEN_REFRESH_EXP_CLAIM": "refresh_exp",
"SLIDING_TOKEN_LIFETIME": timedelta(minutes=15),
"SLIDING_TOKEN_REFRESH_LIFETIME": timedelta(days=1),
}
# Celery ----------------------------------------------------------------------
# https://docs.celeryproject.org/en/stable/userguide/configuration.html#configuration-and-defaults
CELERY_BROKER_URL = config(
"CELERY_BROKER_URL", default="amqp://127.0.0.1:5672/"
)
CELERY_RESULT_BACKEND = config("CELERY_RESULT_BACKEND", default="django-db")
CELERY_CACHE_BACKEND = config("CELERY_CACHE_BACKEND", default="django-cache")
CELERY_TIMEZONE = config("CELERY_TIME_ZONE", default=TIME_ZONE)
CELERY_TASK_TRACK_STARTED = config("CELERY_TASK_TRACK_STARTED", default=True)
CELERY_TASK_TIME_LIMIT = config("CELERY_TASK_TIME_LIMIT", default=30 * 60)
| 32.112727
| 108
| 0.669347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,005
| 0.679991
|
dd837f67ec7177838bf8a526749af097805f6779
| 15,142
|
py
|
Python
|
CalsCamera/main.py
|
NoDrones/Imaging
|
555c8aeced98097379b80f448689f2bf2974c3e9
|
[
"MIT"
] | 1
|
2019-01-28T21:55:53.000Z
|
2019-01-28T21:55:53.000Z
|
CalsCamera/main.py
|
NoDrones/Imaging
|
555c8aeced98097379b80f448689f2bf2974c3e9
|
[
"MIT"
] | null | null | null |
CalsCamera/main.py
|
NoDrones/Imaging
|
555c8aeced98097379b80f448689f2bf2974c3e9
|
[
"MIT"
] | null | null | null |
#Author: Calvin Ryan
import sensor, image, time, pyb, ustruct, math, utime
def get_gain():
gain_reg_val = sensor.__read_reg(0x00)
#print("gain_reg_val: " + str(gain_reg_val))
bitwise_gain_range = (gain_reg_val & 0b11110000) >> 4 #get the highest four bits which correspond to gain range. Depends on the bits set. Can be 0 > 4 for a total of 5 ranges.
#print("bitwise_gain_range: " + str(bin(bitwise_gain_range)))
gain_range = ((bitwise_gain_range & 0b1000) >> 3) + ((bitwise_gain_range & 0b0100) >> 2) + ((bitwise_gain_range & 0b0010) >> 1) + (bitwise_gain_range & 0b0001) #get an int for the number of bits set
#print("read_gain_range: " + str(gain_range))
gain_LSBs = gain_reg_val & 0b00001111 #The 4 lsbs represent the fine tuning gain control.
#print("gain_LSBs: " + str(gain_LSBs))
gain_curve_index = 16 * gain_range + gain_LSBs # this gives you an index from 0 > 79 which is the range of points you need to describe every possible gain setting along the new gain curve
#print("gain_curve_index: " + str(gain_curve_index))
gain = 10 ** (30 * gain_curve_index / 79 / 20) #10** = 10 ^, calculate the gain along the new exponential gain curve I defined earlier on
#print("gain: " + str(gain))
return gain
def set_gain(gain_db):
# gain_correlation_equation = 20*log(gain_db) = 30*(index)/79
gain_curve_index = (79 * 20 * math.log(gain_db, 10)) / 30 #return an index from the new exponential gain curve...
#... Can be 0 > 79 which is the # of points needed to describe every gain setting along the new curve
#print("gain_curve_index: " + str(gain_curve_index))
gain_range = int(gain_curve_index/16) #find a 0 > 4 value for the gain range. This range is defined by the 4 msbs. Thus we divide and round down by the LSB of the 4 MSBs (16)
#print("gain_range: " + str(gain_range))
gain_LSBs = int(gain_curve_index - 16 * gain_range) & 0b00001111 #Find how many LSBs above the gain range the index is. This is your fine tuning gain control
#print("gain_LSBs: " + str(bin(gain_LSBs)))
bitwise_gain_range = (0b1111 << gain_range) & 0b11110000 #make the gain range bitwise
#print("bitwise_gain_range: " + str(bin(bitwise_gain_range)))
gain_reg_val = bitwise_gain_range | gain_LSBs #OR
#print("gain to set: " + str(bin(gain_reg_val)))
sensor.__write_reg(0x00, gain_reg_val)
return gain_reg_val
def set_custom_exposure(high_l_mean_thresh = 17, low_l_mean_thresh = 16):
try:
print("Starting Exposure Adjustment...")
b_gain = sensor.__read_reg(0x01)
r_gain = sensor.__read_reg(0x02)
g_gain = sensor.__read_reg(0x03)
r_gain = round(r_gain/4)
g_gain = round(g_gain/4)
b_gain = round(b_gain/4)
sensor.__write_reg(0x01, b_gain)
sensor.__write_reg(0x02, r_gain)
sensor.__write_reg(0x03, g_gain)
img = sensor.snapshot() # Take a picture and return the image.
img_stats = img.get_statistics()
l_mean = img_stats.l_mean()
count = 0
cur_gain = get_gain()
while(((l_mean > high_l_mean_thresh) | (l_mean < low_l_mean_thresh))) & (count < 256) & (cur_gain >= 0):
img = sensor.snapshot() # Take a picture and return the image.
img_stats = img.get_statistics()
l_mean = img_stats.l_mean()
if ((cur_gain < 1) | (cur_gain > 32)):
break
if l_mean > high_l_mean_thresh:
new_gain = cur_gain - .1
elif l_mean < low_l_mean_thresh:
new_gain = cur_gain + .1
else:
break #we're in the range now!
set_gain(new_gain)
cur_gain = new_gain
count += 1
if (count < 310) | (cur_gain == 0):
print("Exposure Adjustment Complete.")
return l_mean
else:
print("Exposure Adjustment Incomplete.")
return -1
except Exception as e:
print(e)
print("Error occured!")
return -2
if __name__ == "__main__":
########### SETUP STUFF
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
i2c_obj = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12)
i2c_obj.deinit() # Fully reset I2C device...
i2c_obj = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12)
#get in focus balance. You have two seconds.
t_start = time.ticks()
t_elapsed = 0
while(t_elapsed < 1): #ignore bc 1 ms
img = sensor.snapshot()
t_elapsed = time.ticks() - t_start
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
sensor.set_auto_exposure(False)
sensor.set_contrast(+3)
print()
pre_adjust_r_gain = sensor.__read_reg(0x02)
pre_adjust_g_gain = sensor.__read_reg(0x03)
pre_adjust_b_gain = sensor.__read_reg(0x01)
pre_adjust_overall_gain = sensor.__read_reg(0x00)
pre_adjust_exposure = (sensor.__read_reg(0x08) << 8) + sensor.__read_reg(0x10)
print("R gain: " + str(pre_adjust_r_gain))
print("G gain: " + str(pre_adjust_g_gain))
print("B gain: " + str(pre_adjust_b_gain))
print("Overall gain: " + str(pre_adjust_overall_gain))
print("exposure: " + str(pre_adjust_exposure))
print('------------------------------------')
set_l_mean = set_custom_exposure() #default thresholds
print(set_l_mean)
post_adjust_r_gain = sensor.__read_reg(0x02)
post_adjust_g_gain = sensor.__read_reg(0x03)
post_adjust_b_gain = sensor.__read_reg(0x01)
post_adjust_overall_gain = sensor.__read_reg(0x00)
post_adjust_exposure = (sensor.__read_reg(0x08) << 8) + sensor.__read_reg(0x10)
print("R gain: " + str(post_adjust_r_gain))
print("G gain: " + str(post_adjust_g_gain))
print("B gain: " + str(post_adjust_b_gain))
print("Overall gain: " + str(post_adjust_overall_gain))
print("exposure: " + str(post_adjust_exposure))
print()
img = sensor.snapshot()
# should pull img_number from a text file and read the plant_id from a qr code or beaglebone
# default mode is pyb.usb_mode('VCP+MSC')
'''
pyb.usb_mode('VCP+HID')
utime.sleep_ms(1000)
last_photo_id_path = "last_photo_id.txt"
last_photo_id_fd = open(last_photo_id_path, "w+")
img_number_str = last_photo_id_fd.read()
print(img_number_str)
img_number_str = last_photo_id_fd.write("696969")
print("Written bytes: " + str(img_number_str))
img_number_str = last_photo_id_fd.read()
print(img_number_str)
last_photo_id_fd.close()
img_number = 1
plant_id = 1
img_id = str(img_number) + "_plant_" + str(plant_id)
raw_str = "raw_" + str(img_id)
raw_write = image.ImageWriter(raw_str)
raw_write.add_frame(img)
raw_write.close()
img.compress(quality = 100)
img.save("img_" + str(img_id))
raw_read = image.ImageReader(raw_str)
img = raw_read.next_frame(copy_to_fb = True, loop = False)
raw_read.close()
'''
'''
L = Lightness where 0 is black and 100 is white
A = -127 is green and 128 is red
B = -127 is blue and 128 is yellow.
'''
img_stats = img.get_statistics()
########### FIND BAD BLOBS
unhealthy_full_l_mean = 0
unhealthy_full_a_mean = 0
unhealthy_full_b_mean = 0
unhealthy_centroid_l_mean = 0
unhealthy_centroid_a_mean = 0
unhealthy_centroid_b_mean = 0
unhealthy_blob_l_mean = 0
unhealthy_blob_a_mean = 0
unhealthy_blob_b_mean = 0
healthy_full_l_mean = 0
healthy_full_a_mean = 0
healthy_full_b_mean = 0
healthy_centroid_l_mean = 0
healthy_centroid_a_mean = 0
healthy_centroid_b_mean = 0
healthy_blob_l_mean = 0
healthy_blob_a_mean = 0
healthy_blob_b_mean = 0
blob_index = -1
stage_one_bad_thresholds = [(20, 100, -10, 127, 3, 128)]
for blob_index, stage_one_bad_blob in enumerate(img.find_blobs(stage_one_bad_thresholds, pixels_threshold=100, area_threshold=100, merge = False, margin = 15)):
rect_stats = img.get_statistics(roi = stage_one_bad_blob.rect())
print("stage_one_bad_blob: " + str(stage_one_bad_blob))
print("density: " + str(stage_one_bad_blob.density()))
print("full: " + str(rect_stats))
unhealthy_full_l_mean += rect_stats[0]
unhealthy_full_a_mean += rect_stats[8]
unhealthy_full_b_mean += rect_stats[16]
side_l = stage_one_bad_blob.density() * min(stage_one_bad_blob[2], stage_one_bad_blob[3])
partial_hist = img.get_histogram(roi = (stage_one_bad_blob.cx() - round(side_l/2), stage_one_bad_blob.cy() - round(side_l/2), round(side_l), round(side_l)))
partial_stats = partial_hist.get_statistics()
print("partial: "+ str(partial_stats))
unhealthy_centroid_l_mean += partial_stats[0]
unhealthy_centroid_a_mean += partial_stats[8]
unhealthy_centroid_b_mean += partial_stats[16]
blob_stats = img.get_statistics(roi = stage_one_bad_blob.rect(), thresholds = stage_one_bad_thresholds)
print("blob: "+ str(blob_stats))
print("\n")
unhealthy_blob_l_mean += blob_stats[0]
unhealthy_blob_a_mean += blob_stats[8]
unhealthy_blob_b_mean += blob_stats[16]
img.draw_rectangle(stage_one_bad_blob.rect(), color = (255, 255, 255)) #purple
#img.draw_rectangle((stage_one_bad_blob.cx() - round(side_l/2), stage_one_bad_blob.cy() - round(side_l/2), round(side_l), round(side_l)), color = (255, 85, 0))
if blob_index != -1:
unhealthy_full_l_mean = unhealthy_full_l_mean/(blob_index + 1)
unhealthy_full_a_mean = unhealthy_full_a_mean/(blob_index + 1)
unhealthy_full_b_mean = unhealthy_full_b_mean/(blob_index + 1)
unhealthy_centroid_l_mean = unhealthy_centroid_l_mean/(blob_index + 1)
unhealthy_centroid_a_mean = unhealthy_centroid_a_mean/(blob_index + 1)
unhealthy_centroid_b_mean = unhealthy_centroid_b_mean/(blob_index + 1)
unhealthy_blob_l_mean = unhealthy_blob_l_mean/(blob_index + 1)
unhealthy_blob_a_mean = unhealthy_blob_a_mean/(blob_index + 1)
unhealthy_blob_b_mean = unhealthy_blob_b_mean/(blob_index + 1)
print("------------------------------------------------------------------------")
########### FIND GOOD BLOBS
#stage_one_good_thresholds = [(img_stats.l_mean() - 1, 100, -127, img_stats.a_mean() - 4, img_stats.b_mean() - 8, 60)]
stage_one_good_thresholds = [(25, 100, -127, -3, -15, 3)]
for blob_index, stage_one_good_blob in enumerate(img.find_blobs(stage_one_good_thresholds, pixels_threshold=100, area_threshold=100, merge = False, margin = 15)):
rect_stats = img.get_statistics(roi = stage_one_good_blob.rect())
print("stage_one_good_blob: " + str(stage_one_good_blob))
print("density: " + str(stage_one_good_blob.density()))
print("full: "+ str(rect_stats))
healthy_full_l_mean += rect_stats[0]
healthy_full_a_mean += rect_stats[8]
healthy_full_b_mean += rect_stats[16]
side_l = stage_one_good_blob.density() * min(stage_one_good_blob[2], stage_one_good_blob[3])
partial_hist = img.get_histogram(roi = (stage_one_good_blob.cx() - round(side_l/2), stage_one_good_blob.cy() - round(side_l/2), round(side_l), round(side_l)))
partial_stats = partial_hist.get_statistics()
print("partial: "+ str(partial_stats))
healthy_centroid_l_mean += partial_stats[0]
healthy_centroid_a_mean += partial_stats[8]
healthy_centroid_b_mean += partial_stats[16]
blob_stats = img.get_statistics(roi = stage_one_good_blob.rect(), thresholds = stage_one_good_thresholds)
print("blob: "+ str(blob_stats))
print("\n")
healthy_blob_l_mean += blob_stats[0]
healthy_blob_a_mean += blob_stats[8]
healthy_blob_b_mean += blob_stats[16]
img.draw_rectangle(stage_one_good_blob.rect(), color = (0, 0, 0)) #black
#img.draw_rectangle((stage_one_good_blob.cx() - round(side_l/2), stage_one_good_blob.cy() - round(side_l/2), round(side_l), round(side_l)), color = (255, 85, 0))
########## COLOR IT ALL IN
for x in range(stage_one_good_blob[2]):
for y in range(stage_one_good_blob[3]):
pix_location = (stage_one_good_blob[0] + x, stage_one_good_blob[1] + y)
pix_vals = img.get_pixel(pix_location[0], pix_location[1])
lab_pix_vals = image.rgb_to_lab(pix_vals)
if ((lab_pix_vals[1] < (blob_stats.a_mean() + 2 * blob_stats.a_stdev())) & (lab_pix_vals[0] >= (blob_stats.l_mean() - .1 * blob_stats.l_stdev()))): #& (abs(lab_pix_vals[2] - lab_pix_vals[1]) > 10) & (lab_pix_vals[0] > (blob_stats.l_mean() - 10)):
pass
else:
pass
#img.set_pixel(pix_location[0], pix_location[1], (255, 0, 0))
if blob_index != -1:
healthy_full_l_mean = healthy_full_l_mean/(blob_index + 1)
healthy_full_a_mean = healthy_full_a_mean/(blob_index + 1)
healthy_full_b_mean = healthy_full_b_mean/(blob_index + 1)
healthy_centroid_l_mean = healthy_centroid_l_mean/(blob_index + 1)
healthy_centroid_a_mean = healthy_centroid_a_mean/(blob_index + 1)
healthy_centroid_b_mean = healthy_centroid_b_mean/(blob_index + 1)
healthy_blob_l_mean = healthy_blob_l_mean/(blob_index + 1)
healthy_blob_a_mean = healthy_blob_a_mean/(blob_index + 1)
healthy_blob_b_mean = healthy_blob_b_mean/(blob_index + 1)
print(img.compress_for_ide(quality = 100))
print("~~~~~~~~~~~~~~~ RESULTS: ~~~~~~~~~~~~~~~~")
print("good thresholds: " + str(stage_one_good_thresholds))
print("bad thresholds: " + str(stage_one_bad_thresholds))
print("unhealthy full l mean: " + str(unhealthy_full_l_mean))
print("unhealthy full a mean: " + str(unhealthy_full_a_mean))
print("unhealthy full b mean: " + str(unhealthy_full_b_mean))
#print("unhealthy centroid l mean: " + str(unhealthy_centroid_l_mean))
#print("unhealthy centroid a mean: " + str(unhealthy_centroid_a_mean))
#print("unhealthy centroid b mean: " + str(unhealthy_centroid_b_mean))
print("unhealthy blob l mean: " + str(unhealthy_blob_l_mean))
print("unhealthy blob a mean: " + str(unhealthy_blob_a_mean))
print("unhealthy blob b mean: " + str(unhealthy_blob_b_mean))
print("healthy full l mean: " + str(healthy_full_l_mean))
print("healthy full a mean: " + str(healthy_full_a_mean))
print("healthy full b mean: " + str(healthy_full_b_mean))
#print("healthy centroid l mean: " + str(healthy_centroid_l_mean))
#print("healthy centroid a mean: " + str(healthy_centroid_a_mean))
#print("healthy centroid b mean: " + str(healthy_centroid_b_mean))
print("healthy blob l mean: " + str(healthy_blob_l_mean))
print("healthy blob a mean: " + str(healthy_blob_a_mean))
print("healthy blob b mean: " + str(healthy_blob_b_mean))
| 43.636888
| 262
| 0.66616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,828
| 0.318848
|
dd83de2fe7b5a8905e65a790566a924385cc7b19
| 297
|
py
|
Python
|
dados/outside/6-JEL_code.py
|
correia-marcos/Innovation-on-brazilian-economic-research
|
98bc677618ec2dff062db42cda7279a5e7065a32
|
[
"MIT"
] | null | null | null |
dados/outside/6-JEL_code.py
|
correia-marcos/Innovation-on-brazilian-economic-research
|
98bc677618ec2dff062db42cda7279a5e7065a32
|
[
"MIT"
] | null | null | null |
dados/outside/6-JEL_code.py
|
correia-marcos/Innovation-on-brazilian-economic-research
|
98bc677618ec2dff062db42cda7279a5e7065a32
|
[
"MIT"
] | null | null | null |
"""
This script was made to anaylse the relation between JEL and areas in ANPEC.
The idea is that checking the JEL code of each paper, it can be vizualized
whether some papers were published in area (from ANPEC meeting) not expected
by their JEL code.
"""
import os
import pandas as pd
import
| 22.846154
| 76
| 0.767677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 256
| 0.861953
|
dd88982df37b33dce441276837b7773dc3af6b26
| 1,311
|
py
|
Python
|
tests/garage/tf/spaces/test_dict_space.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | 3
|
2019-08-11T22:26:55.000Z
|
2020-11-28T10:23:50.000Z
|
tests/garage/tf/spaces/test_dict_space.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | null | null | null |
tests/garage/tf/spaces/test_dict_space.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | 2
|
2019-08-11T22:30:14.000Z
|
2021-03-25T02:57:50.000Z
|
"""This script tests garage.tf.spaces.dict functionality."""
import unittest
from garage.misc import ext
from garage.tf.envs import TfEnv
from tests.fixtures.envs.dummy import DummyDictEnv
class TestDictSpace(unittest.TestCase):
def test_dict_space(self):
ext.set_seed(0)
# A dummy dict env
dummy_env = DummyDictEnv()
dummy_act = dummy_env.action_space
dummy_act_sample = dummy_act.sample()
# A dummy dict env wrapped by garage.tf
tf_env = TfEnv(dummy_env)
tf_act = tf_env.action_space
tf_obs = tf_env.observation_space
# flat_dim
assert tf_act.flat_dim == tf_act.flatten(dummy_act_sample).shape[-1]
# flat_dim_with_keys
assert tf_obs.flat_dim == tf_obs.flat_dim_with_keys(
iter(["achieved_goal", "desired_goal", "observation"]))
# un/flatten
assert tf_act.unflatten(
tf_act.flatten(dummy_act_sample)) == dummy_act_sample
# un/flatten_n
samples = [dummy_act.sample() for _ in range(10)]
assert tf_act.unflatten_n(tf_act.flatten_n(samples)) == samples
# un/flatten_with_keys
assert tf_act.unflatten_with_keys(
tf_act.flatten_with_keys(dummy_act_sample, iter(["action"])),
iter(["action"]))
| 31.214286
| 76
| 0.661327
| 1,118
| 0.852784
| 0
| 0
| 0
| 0
| 0
| 0
| 253
| 0.192982
|
dd8913997853973a6abd55f95d60d2c6a230000b
| 3,429
|
py
|
Python
|
utils/compare_MRAE.py
|
Liuhongzhi2018/SSRGAN
|
b5be922db1600aabb6a06ee52fb1c83ee738d794
|
[
"Apache-2.0"
] | 1
|
2022-01-21T09:01:48.000Z
|
2022-01-21T09:01:48.000Z
|
utils/compare_MRAE.py
|
Liuhongzhi2018/SSRGAN
|
b5be922db1600aabb6a06ee52fb1c83ee738d794
|
[
"Apache-2.0"
] | 1
|
2021-08-18T11:33:43.000Z
|
2021-08-18T11:33:43.000Z
|
utils/compare_MRAE.py
|
Liuhongzhi2018/SSRGAN
|
b5be922db1600aabb6a06ee52fb1c83ee738d794
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import cv2
import numpy as np
import hdf5storage as hdf5
from scipy.io import loadmat
from matplotlib import pyplot as plt
from SpectralUtils import savePNG, projectToRGB
from EvalMetrics import computeMRAE
BIT_8 = 256
# read path
def get_files(path):
# read a folder, return the complete path
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-4:] == '.mat':
ret.append(os.path.join(root, filespath))
return ret
def get_jpgs(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-4:] == '.mat':
ret.append(filespath)
return ret
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def demo_track1(filePath, filtersPath):
#filePath = "F:\\NTIRE 2020\\spectral reconstruction\\code1\\en4_track1\\ARAD_HS_0451.mat"
#filtersPath = "./resources/cie_1964_w_gain.npz"
# Load HS image and filters
cube = hdf5.loadmat(filePath)['cube']
#cube = loadmat(filePath)['cube']
filters = np.load(filtersPath)['filters']
# Project image to RGB
rgbIm = np.true_divide(projectToRGB(cube, filters), BIT_8)
# Save image file
path = 'temp_clean.png'
savePNG(rgbIm, path)
# Display RGB image
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.title('Example "Clean" Output Image')
plt.show()
def single_img_mrae(generated_mat_path, groundtruth_mat_path):
#generated_mat_path = "F:\\NTIRE 2020\\spectral reconstruction\\code1\\en4_track1\\ARAD_HS_0451.mat"
#groundtruth_mat_path = "F:\\NTIRE 2020\\spectral reconstruction\\NTIRE2020_Validation_Spectral\\ARAD_HS_0451.mat"
generated_mat = hdf5.loadmat(generated_mat_path)['cube'] # shape: (482, 512, 31)
groundtruth_mat = hdf5.loadmat(groundtruth_mat_path)['cube'] # shape: (482, 512, 31)
mrae = computeMRAE(generated_mat, groundtruth_mat)
print(mrae)
return mrae
def folder_img_mrae(generated_folder_path, groundtruth_folder_path):
#generated_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\code1\\en4_track1"
#groundtruth_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\NTIRE2020_Validation_Spectral"
matlist = get_jpgs(generated_folder_path)
avg_mrae = 0
for i, matname in enumerate(matlist):
generated_mat_path = os.path.join(generated_folder_path, matname)
groundtruth_mat_path = os.path.join(groundtruth_folder_path, matname)
generated_mat = hdf5.loadmat(generated_mat_path)['cube'] # shape: (482, 512, 31)
groundtruth_mat = hdf5.loadmat(groundtruth_mat_path)['cube'] # shape: (482, 512, 31)
mrae = computeMRAE(generated_mat, groundtruth_mat)
avg_mrae = avg_mrae + mrae
print('The %d-th mat\'s mrae:' % (i + 1), mrae)
avg_mrae = avg_mrae / len(matlist)
print('The average mrae is:', avg_mrae)
return avg_mrae
generated_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\ensemble\\ensemble\\track1"
generated_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\ensemble\\ensemble\\track2"
groundtruth_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\NTIRE2020_Validation_Spectral"
avg_mrae = folder_img_mrae(generated_folder_path, groundtruth_folder_path)
| 36.870968
| 118
| 0.706328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,189
| 0.346748
|
dd895eff6bdbc6e4f11421a7c77e8c3865e7d03d
| 2,435
|
py
|
Python
|
board/send_message.py
|
ben741863140/cfsystem
|
227e269f16533719251962f4d8caee8b51091d2f
|
[
"Apache-2.0"
] | 4
|
2018-02-22T01:59:07.000Z
|
2020-07-09T06:28:46.000Z
|
board/send_message.py
|
ben741863140/cfsystem
|
227e269f16533719251962f4d8caee8b51091d2f
|
[
"Apache-2.0"
] | null | null | null |
board/send_message.py
|
ben741863140/cfsystem
|
227e269f16533719251962f4d8caee8b51091d2f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import gzip
import re
import http.cookiejar
import urllib.request
import urllib.parse
# from logreg.sender import use_sender, sender
def send_message(handle, content, captcha):
def ungzip(data):
return gzip.decompress(data)
def get_csrf(data):
cer = re.compile('data-csrf=\'(.*?)\'> </span>', re.S)
return cer.findall(data)[0]
def getOpener(head):
# deal with coookie
cj = http.cookiejar.CookieJar()
pro = urllib.request.HTTPCookieProcessor(cj)
opener = urllib.request.build_opener(pro)
header = []
for key, value in head.items():
elem = (key, value)
header.append(elem)
opener.addheaders = header
return opener
header = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Host': 'www.codeforces.com',
'DNT': '1'
}
url = 'http://codeforces.com/enter'
opener = getOpener(header)
data = opener.open(url).read()
data = ungzip(data)
csrf_token = get_csrf(data.decode())
# print(data)
# use = str(sender(use_sender()))
post_dict = {
'csrf_token': csrf_token,
'action': 'enter',
'ftaa': 'facg0yyl14awvys2jp',
'bfaa': 'd3165a769f306b8a47053d749e2d920a',
'handleOrEmail': 'scau_support',
'password': 'Aa123456',
'_tta': '435'
}
# print(use)
# print(handle)
# print(data)
# if 'scau_support' not in str(data):
# return -1
post_data = urllib.parse.urlencode(post_dict).encode()
opener.open(url, post_data)
url = 'http://codeforces.com/usertalk?other=' + str(handle)
data = opener.open(url).read()
data = ungzip(data)
if 'scau_support' not in str(data):
return -1
csrf_token = get_csrf(data.decode())
post_dict = {
'csrf_token': csrf_token,
'action': 'sendMessage',
'content': content,
'_tta': '435'
}
post_data = urllib.parse.urlencode(post_dict).encode()
data = opener.open(url, post_data).read()
data = ungzip(data)
# print(data)
if captcha not in str(data):
return 1
return 0
| 29.695122
| 93
| 0.588501
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 844
| 0.346612
|
dd8f4efcbe0f27065a9ebbb9e0dbf84e6d87fb06
| 22,725
|
py
|
Python
|
preprocess.py
|
cheery/pytci
|
57b526b621da5e6783d15d73bac4ca57b281be70
|
[
"MIT"
] | 148
|
2015-10-18T04:47:27.000Z
|
2021-11-09T09:36:43.000Z
|
preprocess.py
|
cheery/pytci
|
57b526b621da5e6783d15d73bac4ca57b281be70
|
[
"MIT"
] | 3
|
2015-10-21T21:21:38.000Z
|
2017-04-10T00:16:04.000Z
|
preprocess.py
|
cheery/pytci
|
57b526b621da5e6783d15d73bac4ca57b281be70
|
[
"MIT"
] | 10
|
2015-10-18T03:29:39.000Z
|
2018-12-18T12:14:01.000Z
|
import tokenize, operator
macro_list = {}
def chomp(state, with_defined=False):
token = state.next_expanded_token()
while token is not None:
if name_of(token) == "macro":
for token in run_macro(state):
yield token
elif state.processing:
yield token
token = state.next_expanded_token()
def run_macro(state):
identifier = state.expect('identifier')
macro_name = value_of(identifier)
if macro_name in macro_list:
for token in macro_list[macro_name](state, position_of(identifier)):
yield token
assert state.macro_end(), "{1}: {0}: macro is expected to end".format(*position_of(identifier))
else:
assert False, "Macro {!r} not implemented".format(macro_name)
def pump_token(macro_stack, macro_queue, current, passthrough):
if isinstance(current, list): # an expansion.
for current in current:
pump_token(macro_stack, macro_queue, current, passthrough)
elif len(macro_stack) > 0:
if macro_stack[-1].pump(current):
macro_queue.extend(macro_stack.pop(-1).invoke())
else:
passthrough.append(current)
def pull_identifier(state):
token = state.next_token()
if token and value_of(token) == '(':
token = state.next_token()
stop = state.next_token()
assert stop and value_of(stop) == ')', "bonked 'defined'"
assert token and name_of(token) == 'identifier', "bonked 'defined'"
return token
class CallBuilder(object):
def __init__(self, position, expansion): # at this point '(' has been processed.
self.position = position
self.expansion = expansion
self.bumps = 0
self.bags = []
self.bag = []
self.toco = 0
def pump(self, token):
value = value_of(token)
if value == ',' and self.bumps == 0:
self.bags.append(self.bag)
self.bag = []
if value == '(':
self.bumps += 1
if value == ')':
if self.bumps == 0:
return True
self.bumps -= 1
self.bag.append(token)
return False
def invoke(self):
if self.toco > 1: # only if there were nonzero arguments.
self.bags.append(self.bag)
return self.expansion(self.position, self.bags)
def process_define(state, position):
macro_name = value_of(state.expect('identifier'))
macro_func = (state.stream.character == '(')
macro_stream = state.macro_stream()
if state.processing:
if macro_func: # TODO: warn if macro is redefined
state.env[macro_name] = parse_macro_function(macro_stream)
else:
state.env[macro_name] = list(macro_stream)
return ()
def process_undef(state, position):
macro_name = value_of(state.expect('identifier'))
if state.processing and macro_name in state.env:
state.env.pop(macro_name)
return ()
def process_if(state, position):
state.stack.append((state.processing, state.processing_inside))
state.processing_inside = 'cond-done'
macro_stream = state.hacked_macro_expansion()
if state.processing:
state.processing = bool(state.macroeval(state, macro_stream))
state.processing_inside = ('cond', 'cond-done')[state.processing]
return ()
def process_elif(state, position):
macro_stream = state.hacked_macro_expansion()
if state.processing_inside == 'cond':
state.processing = state.macroeval(state, macro_stream)
state.processing_inside = ('cond', 'cond-done')[state.processing]
elif state.processing_inside == 'cond-done':
state.processing = False
else:
assert False, "{1}: {0}: #elif at toplevel".format(*position)
return ()
def process_else(state, position):
if state.processing_inside == 'cond':
state.processing = True
elif state.processing_inside == 'cond-done':
state.processing = False
else:
assert False, "{2}: {1}: #else at {0}".format(state.processing_inside, *position)
state.processing_inside = 'else-block'
return ()
def process_ifdef(state, position):
state.stack.append((state.processing, state.processing_inside))
state.processing_inside = 'cond-done'
macro_name = value_of(state.expect('identifier'))
if state.processing:
state.processing = macro_name in state.env
state.processing_inside = ('cond', 'cond-done')[state.processing]
return ()
def process_ifndef(state, position):
state.stack.append((state.processing, state.processing_inside))
state.processing_inside = 'cond-done'
macro_name = value_of(state.expect('identifier'))
if state.processing:
state.processing = macro_name not in state.env
state.processing_inside = ('cond', 'cond-done')[state.processing]
return ()
def process_endif(state, position):
assert len(state.stack) > 0, "{1}: {0}: excess endif".format(*position)
state.processing, state.processing_inside = state.stack.pop(-1)
return ()
# This error handling is bit weird. The idea is
# that you could generate stubs instead of halting the compiler.
def process_error(state, position):
macro_stream = state.macro_stream()
if state.processing:
message = ' '.join(map(value_of, macro_stream))
return [tokenize.token(position, 'error', message)]
return ()
def process_line(state, position):
line = int(value_of(state.expect('number')))
filename = value_of(state.expect('string'))
state.stream.skip_spaces()
state.stream.line = line
state.stream.filename = filename
return ()
def process_include(state, position):
if state.macro_end():
assert len(state.stack) > 0, "{1}: {0}: malformed include".format(*position)
token = state.next_token()
if name_of(token) == 'string':
if state.processing:
return state.include(state, position, value_of(token), True)
else:
assert value_of(token) == '<', "{1}: {0}: malformed include".format(*position)
string = ""
while state.stream.character not in ('>', '', '\n'):
string += state.stream.get_next()
assert state.stream.get_next() == '>', "{1}: {0}: malformed include".format(*position)
if state.processing:
return state.include(state, position, string, False)
return ()
def _init_itself():
for name, value in globals().iteritems():
if name.startswith('process_'):
macro_name = name.split('_', 1)[1]
macro_list[macro_name] = value
_init_itself()
# ? - badly implemented, ! - no implementation
#? Integer constants.
#? Character constants, which are interpreted as they would be in normal code.
#? Arithmetic operators for addition, subtraction, multiplication, division,
#?? bitwise operations, shifts, comparisons, and logical operations (&& and ||).
#! The latter two obey the usual short-circuiting rules of standard C.
# Macros. All macros in the expression are expanded before
# actual computation of the expression's value begins.
# Uses of the defined operator, which lets you check whether macros are defined in the middle of an #if.
# Identifiers that are not macros, which are all considered to be the number zero.
# This allows you to write #if MACRO instead of #ifdef MACRO, if you know that MACRO,
# when defined, will always have a nonzero value. Function-like macros used without
# their function call parentheses are also treated as zero.
def default_macroeval(state, sequence):
context = []
value_stack = []
operator_stack = []
def flip(precedence):
while len(operator_stack) > 0 and quick_precedence_table[operator_stack[-1]] >= precedence:
op = operator_stack.pop(-1)
argc, fn = quick_operator_table[op]
value_stack[-argc:] = [fn(*value_stack[-argc:])]
for token in sequence:
if value_of(token) in quick_precedence_table:
flip(quick_precedence_table[value_of(token)])
operator_stack.append(value_of(token))
elif value_of(token) == '(':
context.append((value_stack, operator_stack))
value_stack = []
operator_stack = []
elif value_of(token) == ')':
flip(0)
assert len(value_stack) == 1, "lol?"
vs, operator_stack = context.pop(-1)
value_stack = vs + value_stack
elif name_of(token) == 'number':
if '.' in value_of(token):
value_stack.append(float(value_of(token)))
elif value_of(token).startswith('0x') or value_of(token).startswith('0X'):
value_stack.append(long(value_of(token).rstrip('L'), 16))
else:
value_stack.append(long(value_of(token).rstrip('L')))
flip(150)
elif name_of(token) in ('char', 'string'):
value_stack.append(value_of(token))
flip(150)
elif name_of(token) == 'identifier':
value_stack.append(0)
else:
assert False, "Not sure how to macro-evaluate: {}".format(token)
flip(0)
if len(value_stack) == 1 and len(context) == 0:
return value_stack[0]
assert False, (value_stack + list(reversed(operator_stack)))
quick_operator_table = {
'!':(1, operator.not_),
'*':(2, operator.mul), '/':(2, operator.div), '%':(2, operator.mod),
'+':(2, operator.add), '-':(2, operator.sub),
'<<':(2, operator.lshift), '>>':(2, operator.rshift),
'<':(2, operator.lt), '<=':(2, operator.le), '>':(2, operator.gt), '>=':(2, operator.ge),
'==':(2, operator.eq), '!=':(2, operator.ne),
'&':(2, operator.and_), '^':(2, operator.xor), '|':(2, operator.or_),
'&&':(2, (lambda x, y: x and y)), '||':(2, (lambda x, y: x or y)),
}
quick_precedence_table = {
'!':200,
'*':100, '/':100, '%':100,
'+':80, '-':80,
'<<':70, '>>':70,
'<':60, '<=':60, '>':60, '>=':60,
'==':50, '!=':50,
'&':40, '^':35, '|':30, '&&':25, '||':20,
}
class BaseContext(object):
def __init__(self, parent, stream, shadow):
self.parent = parent
self.stream = stream
self.shadow = shadow
self.variables = ()
self.exhausted = False
def next_token(self):
token = tokenize.chop(self.stream)
if token is None:
self.exhausted = True
return token
def macro_concat(self):
return False
def macro_end(self):
self.stream.skip_spaces()
return self.stream.character in ('', '\n')
def macro_func(self):
self.stream.skip_spaces_and_newlines()
return self.stream.character == '('
class ExpandContext(object):
def __init__(self, parent, stream, shadow, variables=()):
self.parent = parent
self.stream = stream
self.shadow = shadow
self.variables = variables
try:
self.exhausted = False
self.lookahead = stream.next()
except StopIteration as stop:
self.exhausted = True
self.lookahead = None
def next_token(self):
token = self.lookahead
try:
self.exhausted = False
self.lookahead = self.stream.next()
except StopIteration as stop:
self.exhausted = True
self.lookahead = None
assert token is not None, "over fetch"
return token
def macro_concat(self):
return self.lookahead and value_of(self.lookahead) == '##'
def macro_end(self):
return False
def macro_func(self):
assert not self.exhausted
return value_of(self.lookahead) == '('
class ExpandedContext(ExpandContext):
pass
class PreprocessorState(object):
def __init__(self, stream, env, include_chain=(), include=(), macroeval=default_macroeval, processing=True, processing_inside='toplevel'):
self.stream = stream
self.env = env
self.include_chain = include_chain
self.include = include
self.macroeval = macroeval
self.processing = processing
self.processing_inside = processing_inside
self.stack = []
self.context = BaseContext(None, stream, ())
def pump_context(self):
while self.context and self.context.exhausted:
self.context = self.context.parent
def next_token(self):
token = self.context.next_token()
self.pump_context()
return token
def next_expanded_token(self, with_defined=False):
context = self.context
token = self.next_token()
if self.context and self.context.macro_concat():
self.next_token()
assert self.context == context, "Catenate should stick within context"
other = self.expect('identifier')
return catenate_tokens(token, other, context.variables)
if isinstance(context, ExpandedContext):
return token
if with_defined and token and value_of(token) == 'defined':
token = self.next_token()
if token and value_of(token) == '(':
token = self.expect('identifier')
rp = self.next_token()
assert rp and value_of(rp) == ')', "expected right parenthesis in 'defined'"
return tokenize.token(position_of(token), "number", ["0", "1"][value_of(token) in self.env])
if token and value_of(token) == '#' and isinstance(context, ExpandContext) and context == self.context:
ntoken = self.next_token()
if value_of(ntoken) in context.variables:
return tokenize.token(position_of(ntoken), "string", stringify(context.variables[value_of(ntoken)]))
else:
assert False, "'#' outside proper context"
if token and name_of(token) == 'identifier':
value = value_of(token)
if value in context.shadow:
return token
if value in context.variables:
self.context = ExpandedContext(self.context, iter(context.variables[value]), ())
self.pump_context()
return self.next_expanded_token()
expansion = self.env.get(value)
if expansion is None:
return token
elif callable(expansion) and self.context and self.context.macro_func():
args = self.next_macro_call()
self.context = expansion(self.context, context.shadow + (value,), position_of(token), args)
self.pump_context()
return self.next_expanded_token()
else:
self.context = ExpandContext(self.context, iter(expansion), context.shadow + (value,))
self.pump_context()
return self.next_expanded_token()
return token
def next_macro_call(self):
begin = self.next_token()
assert value_of(begin) == '(', "broken preprocessor"
token = self.next_expanded_token()
if token and value_of(token) == ')':
return []
bag = []
args = [bag]
while token and value_of(token) != ')':
if value_of(token) == '(':
bag.extend(self.grouped_tokens(token))
elif value_of(token) == ',':
bag = []
args.append(bag)
else:
bag.append(token)
token = self.next_expanded_token()
if token and value_of(token) == ')':
return args
assert False, "{1}: {0}: nonterminated macro call".format(position_of(begin))
def grouped_tokens(self, token):
yield token
token = self.next_expanded_token()
while token and value_of(token) != ')':
if value_of(token) == '(':
for subtoken in self.grouped_tokens(token):
yield subtoken
else:
yield token
token = self.next_expanded_token()
if token and value_of(token) == ')':
yield token
else:
assert False, "{1}: {0}: nonterminated preprocessed group".format(position_of(begin))
def expect(self, name):
token = self.next_token()
assert name_of(token) == name, "'expected' error message not implemented: {} got {}".format(name, token)
return token
def macro_end(self):
self.pump_context()
if self.context is None:
return True
return self.context.macro_end()
def macro_stream(self):
sequence = []
while not self.macro_end():
sequence.append(self.next_token())
return MacroStream(iter(sequence))
def hacked_macro_expansion(self):
sequence = []
while not self.macro_end():
sequence.append(self.next_token())
original_context = self.context
self.context = ExpandContext(None, iter(sequence), original_context.shadow)
sequence = []
while self.context is not None:
sequence.append(self.next_expanded_token(with_defined=True))
self.context = original_context
return MacroStream(iter(sequence))
def fork(self, stream, filename):
include_chain = self.include_chain + (filename,)
return self.__class__(stream, self.env, include_chain,
include=self.include, macroeval=self.macroeval)
class MacroStream(object):
def __init__(self, generator):
self.generator = generator
def next_token(self):
try:
return self.generator.next()
except StopIteration as stop:
return None
def __iter__(self):
return self.generator
def parse_macro_function(stream):
lp = stream.next_token()
assert value_of(lp) == '(', "broken preprocessor"
bind = []
is_variadic = False
current = stream.next_token()
while current and value_of(current) != ')':
if value_of(current) == '...':
is_variadic = True
break
bind.append(value_of(current))
current = stream.next_token()
if not current or value_of(current) != ",":
break
current = stream.next_token()
assert current and value_of(current) == ')', "{1}: {0}: unterminated argument list error".format(position_of(lp))
return MacroFunction(bind, is_variadic, list(stream))
class MacroFunction(object):
def __init__(self, bind, is_variadic, body):
self.bind = bind
self.is_variadic = is_variadic
self.body = body
def __call__(self, context, shadow, position, args):
if self.is_variadic:
assert len(args) >= len(self.bind), "argument error, error not implemented. {} -> {} ({})".format(position, self.bind, args)
else:
assert len(args) == len(self.bind), "argument error, error not implemented. {} -> {} ({})".format(position, self.bind, args)
variables = dict(zip(self.bind, args))
if self.is_variadic:
variables['...'] = args[len(self.bind):]
return ExpandContext(context, iter(self.body), shadow, variables)
def position_of(token):
return token[0]
def name_of(token):
return token[1]
def value_of(token):
return token[2]
def stringify(tokens):
return ' '.join(map(value_of, tokens))
def catenate_tokens(lhs, rhs, variables):
position = position_of(lhs)
if value_of(lhs) in variables:
lhs = variables[value_of(lhs)]
assert len(lhs) <= 1, "rare case for catenation"
else:
lhs = [lhs]
if value_of(rhs) in variables:
rhs = variables[value_of(rhs)]
assert len(rhs) <= 1, "rare case for catenation"
else:
rhs = [rhs]
return tokenize.token(position, 'identifier', ''.join(map(value_of, lhs + rhs)))
if False:
import character_stream
pull = character_stream.pull
def new_pull(x):
ch = pull(x)
print "read character:", repr(ch)
return ch
character_stream.pull = new_pull
if __name__=='__main__':
import trigraphs, traceback, os, sys
from character_stream import CharacterStream
local_includes = []
global_includes = ['/usr/lib/gcc/x86_64-linux-gnu/4.8/include', '/usr/local/include', '/usr/lib/gcc/x86_64-linux-gnu/4.8/include-fixed', '/usr/include/x86_64-linux-gnu', '/usr/include']
def include_stub(state, position, name, local):
if local:
includes = [os.path.dirname(state.include_chain[-1])] + local_includes
else:
includes = global_includes
filename = name
for dirname in includes:
path = os.path.join(dirname, name)
if os.path.exists(path):
filename = path
break
else:
return [] # incorrect, but lets lol later.
if filename in state.include_chain:
print "{1}: {0}: cyclic include: ".format(*position) + filename
return [] # incorrect again
with open(filename, 'r') as fd:
contents = fd.read()
stream = CharacterStream(trigraphs.translate(contents), 1, filename)
return chomp(state.fork(stream, filename))
def advance_position((line0, file0), (line1, file1)):
if file0 == file1 and line1 - 9 < line0 < line1:
sys.stdout.write("\n" * (line1 - line0))
else:
sys.stdout.write("\n#line {!r} \"{!s}\"\n".format(line1, file1))
return (line1, file1)
def main():
import sys
env = {}
if len(sys.argv) < 2:
filename = "/usr/include/stdio.h"
else:
filename = sys.argv[1]
with open(filename, 'r') as fd:
contents = fd.read()
stream = CharacterStream(trigraphs.translate(contents), 1, filename)
state = PreprocessorState(stream, env,
include_chain=(filename,),
include=include_stub)
try:
position = (1, filename)
for token in chomp(state):
if position_of(token) != position:
position = advance_position(position, position_of(token))
# Token generation. This is still wrong
if name_of(token) == 'string':
sys.stdout.write('"' + value_of(token) + '" ')
elif name_of(token) == 'char':
sys.stdout.write("'" + value_of(token) + "' ")
else:
sys.stdout.write(value_of(token) + ' ')
sys.stdout.write('\n')
except AssertionError as ass:
traceback.print_exc()
for x in xrange(5):
print tokenize.chop(stream)
main()
| 37.561983
| 189
| 0.60374
| 9,242
| 0.406689
| 1,286
| 0.05659
| 0
| 0
| 0
| 0
| 3,001
| 0.132057
|
dd8f9e880d1c5b15888f038a47c041322592d1b0
| 2,177
|
py
|
Python
|
arfit/run_carma_pack.py
|
farr/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | 5
|
2015-04-29T21:46:52.000Z
|
2021-05-13T04:59:23.000Z
|
arfit/run_carma_pack.py
|
afcarl/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | null | null | null |
arfit/run_carma_pack.py
|
afcarl/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | 2
|
2015-12-03T12:08:32.000Z
|
2018-05-26T16:20:31.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import carmcmc as cm
import numpy as np
import os
import plotutils.autocorr as ac
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, metavar='FILE', help='input file')
parser.add_argument('--output', required=True, metavar='FILE', help='chain output')
parser.add_argument('--p', default=3, type=int, metavar='P', help='AR order (default: %(default)s)')
parser.add_argument('--q', default=2, type=int, metavar='Q', help='MA order (default: %(default)s)')
parser.add_argument('--neff', default=1000, type=int, metavar='N', help='number of independent samples (default: %(default)s)')
parser.add_argument('--tmax', default=100.0, type=float, metavar='T', help='maximum temperature')
parser.add_argument('--ntemp', default=10, type=int, metavar='N', help='number of temperatures')
args = parser.parse_args()
data = np.loadtxt(args.input)
times, tind = np.unique(data[:,0], return_index=True)
data = data[tind, :]
model = cm.CarmaModel(data[:,0], data[:,1], data[:,2], p=args.p, q=args.q)
thin = 1
nsamp = 10*args.neff
out, ext = os.path.splitext(args.output)
outtemp = out + '.TEMP' + ext
while True:
sample = model.run_mcmc(nsamp, nthin=thin, nburnin=thin*nsamp/2, tmax=args.tmax, ntemperatures=args.ntemp)
np.savetxt(outtemp, np.column_stack((sample.trace, sample.get_samples('loglik'), sample.get_samples('logpost'))))
os.rename(outtemp, args.output)
taus = []
for j in range(sample.trace.shape[1]):
taus.append(ac.autocorrelation_length_estimate(sample.trace[:,j]))
taus = np.array(taus)
if np.any(np.isnan(taus)):
neff_achieved = 0
else:
neff_achieved = sample.trace.shape[0] / np.max(taus)
print('Ran for ', nsamp*thin, ' steps, achieved ', neff_achieved, ' independent samples')
sys.__stdout__.flush()
if neff_achieved >= args.neff:
break
else:
thin *= 2
| 34.015625
| 131
| 0.635278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 378
| 0.173633
|
dd90daf3764122311fdf88342d48e04ecb1b9e7d
| 3,946
|
py
|
Python
|
editquality/feature_lists/wikitext.py
|
paulkernfeld/editquality
|
029f21278d89d6e50b0eac7b39d8355f8e4686f4
|
[
"MIT"
] | 18
|
2015-09-13T10:47:31.000Z
|
2018-08-20T15:00:35.000Z
|
editquality/feature_lists/wikitext.py
|
paulkernfeld/editquality
|
029f21278d89d6e50b0eac7b39d8355f8e4686f4
|
[
"MIT"
] | 98
|
2015-12-13T12:18:24.000Z
|
2018-08-07T21:10:46.000Z
|
editquality/feature_lists/wikitext.py
|
paulkernfeld/editquality
|
029f21278d89d6e50b0eac7b39d8355f8e4686f4
|
[
"MIT"
] | 17
|
2015-09-29T20:52:12.000Z
|
2018-08-20T11:33:30.000Z
|
from revscoring.features import Feature, wikitext
from revscoring.features.modifiers import div, log, max, sub
def _process_new_longest(p_longest, r_longest):
if r_longest > p_longest:
return r_longest
else:
return 1
parent = [
log(wikitext.revision.parent.chars + 1),
log(wikitext.revision.parent.tokens + 1),
log(wikitext.revision.parent.words + 1),
log(wikitext.revision.parent.uppercase_words + 1),
log(wikitext.revision.parent.headings + 1),
log(wikitext.revision.parent.wikilinks + 1),
log(wikitext.revision.parent.external_links + 1),
log(wikitext.revision.parent.templates + 1),
log(wikitext.revision.parent.ref_tags + 1),
div(wikitext.revision.parent.chars,
max(wikitext.revision.parent.words, 1),
name="revision.parent.chars_per_word"),
div(wikitext.revision.parent.words,
max(wikitext.revision.parent.tokens, 1),
name="revision.parent.words_per_token"),
div(wikitext.revision.parent.uppercase_words,
max(wikitext.revision.parent.words, 1),
name="revision.parent.uppercase_words_per_word"),
div(wikitext.revision.parent.markups,
max(wikitext.revision.parent.tokens, 1),
name="revision.parent.markups_per_token"),
]
diff = [
wikitext.revision.diff.markup_delta_sum,
wikitext.revision.diff.markup_delta_increase,
wikitext.revision.diff.markup_delta_decrease,
wikitext.revision.diff.markup_prop_delta_sum,
wikitext.revision.diff.markup_prop_delta_increase,
wikitext.revision.diff.markup_prop_delta_decrease,
wikitext.revision.diff.number_delta_sum,
wikitext.revision.diff.number_delta_increase,
wikitext.revision.diff.number_delta_decrease,
wikitext.revision.diff.number_prop_delta_sum,
wikitext.revision.diff.number_prop_delta_increase,
wikitext.revision.diff.number_prop_delta_decrease,
wikitext.revision.diff.uppercase_word_delta_sum,
wikitext.revision.diff.uppercase_word_delta_increase,
wikitext.revision.diff.uppercase_word_delta_decrease,
wikitext.revision.diff.uppercase_word_prop_delta_sum,
wikitext.revision.diff.uppercase_word_prop_delta_increase,
wikitext.revision.diff.uppercase_word_prop_delta_decrease,
sub(wikitext.revision.chars,
wikitext.revision.parent.chars,
name="revision.diff.chars_change"),
sub(wikitext.revision.tokens,
wikitext.revision.parent.tokens,
name="revision.diff.tokens_change"),
sub(wikitext.revision.words,
wikitext.revision.parent.words,
name="revision.diff.words_change"),
sub(wikitext.revision.markups,
wikitext.revision.parent.markups,
name="revision.diff.markups_change"),
sub(wikitext.revision.headings,
wikitext.revision.parent.headings,
name="revision.diff.headings_change"),
sub(wikitext.revision.external_links,
wikitext.revision.parent.external_links,
name="revision.diff.external_links_change"),
sub(wikitext.revision.wikilinks,
wikitext.revision.parent.wikilinks,
name="revision.diff.wikilinks_change"),
sub(wikitext.revision.templates,
wikitext.revision.parent.templates,
name="revision.diff.templates_change"),
sub(wikitext.revision.tags,
wikitext.revision.parent.tags,
name="revision.diff.tags_change"),
sub(wikitext.revision.ref_tags,
wikitext.revision.parent.ref_tags,
name="revision.diff.ref_tags_change"),
Feature("revision.diff.longest_new_token",
_process_new_longest, returns=int,
depends_on=[wikitext.revision.parent.longest_token,
wikitext.revision.longest_token]),
Feature("revision.diff.longest_new_repeated_char",
_process_new_longest, returns=int,
depends_on=[wikitext.revision.parent.longest_repeated_char,
wikitext.revision.longest_repeated_char])
]
| 41.978723
| 71
| 0.727826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 521
| 0.132032
|
dd9402b8557bc8fee0baeb9f728d3c332668ae1e
| 2,240
|
py
|
Python
|
test/http2_test/http2_server_health_check.py
|
miyachu/grpc
|
a06ea3c3162c10ff90a1578bf82bbbff95dc799d
|
[
"BSD-3-Clause"
] | 2
|
2021-09-10T00:20:13.000Z
|
2021-11-16T11:27:19.000Z
|
test/http2_test/http2_server_health_check.py
|
miyachu/grpc
|
a06ea3c3162c10ff90a1578bf82bbbff95dc799d
|
[
"BSD-3-Clause"
] | null | null | null |
test/http2_test/http2_server_health_check.py
|
miyachu/grpc
|
a06ea3c3162c10ff90a1578bf82bbbff95dc799d
|
[
"BSD-3-Clause"
] | 1
|
2020-11-04T04:19:45.000Z
|
2020-11-04T04:19:45.000Z
|
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import hyper
import sys
# Utility to healthcheck the http2 server. Used when starting the server to
# verify that the server is live before tests begin.
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--server_host', type=str, default='localhost')
parser.add_argument('--server_port', type=int, default=8080)
args = parser.parse_args()
server_host = args.server_host
server_port = args.server_port
conn = hyper.HTTP20Connection('%s:%d' % (server_host, server_port))
conn.request('POST', '/grpc.testing.TestService/UnaryCall')
resp = conn.get_response()
if resp.headers.get('grpc-encoding') is None:
sys.exit(1)
else:
sys.exit(0)
| 44.8
| 75
| 0.766964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,743
| 0.778125
|
dd9452c189452f40fb4e6f56c43cb761ffc48203
| 3,494
|
py
|
Python
|
server/droidio/demands/test/test_views.py
|
lucasOlivio/droid.io
|
945b1452eaaa73b4d7f9d1d1a35eaa2900e97e96
|
[
"MIT"
] | null | null | null |
server/droidio/demands/test/test_views.py
|
lucasOlivio/droid.io
|
945b1452eaaa73b4d7f9d1d1a35eaa2900e97e96
|
[
"MIT"
] | null | null | null |
server/droidio/demands/test/test_views.py
|
lucasOlivio/droid.io
|
945b1452eaaa73b4d7f9d1d1a35eaa2900e97e96
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from nose.tools import eq_
from faker import Faker
import factory
from ..models import Demand
from .factories import DemandFactory
from ..serializers import DemandSerializer
from droidio.users.test.factories import UserFactory
fake = Faker()
class TestDemandListTestCase(APITestCase):
""" Tests /demands list operations.
"""
def setUp(self):
self.user = UserFactory()
self.client.force_authenticate(user=self.user)
self.url = reverse("demands-list")
self.demand_data = factory.build(dict, FACTORY_CLASS=DemandFactory)
def test_post_request_with_no_data_fails(self):
response = self.client.post(self.url, {})
eq_(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_request_with_valid_data_succeeds(self):
response = self.client.post(self.url, self.demand_data)
eq_(response.status_code, status.HTTP_201_CREATED)
demand = Demand.objects.get(pk=response.data.get("id"))
eq_(demand.description, self.demand_data.get("description"))
def test_get_list_returns_only_my_demands(self):
# Set testing demands
DemandFactory(user_created=self.user)
user2 = UserFactory()
DemandFactory(user_created=user2)
# Test response and results
response = self.client.get(self.url)
eq_(response.status_code, status.HTTP_200_OK)
demands = Demand.objects.filter(user_created=self.user)
serializer = DemandSerializer(demands, many=True)
eq_(response.data["count"], 1)
eq_(response.data["results"], serializer.data)
class TestDemandDetailTestCase(APITestCase):
""" Tests /demands detail operations.
"""
def setUp(self):
self.user = UserFactory()
self.client.force_authenticate(user=self.user)
self.demand = DemandFactory(user_created=self.user)
self.url = reverse("demands-detail", kwargs={"pk": self.demand.pk})
def test_get_request_returns_a_given_demand(self):
response = self.client.get(self.url)
eq_(response.status_code, status.HTTP_200_OK)
def test_patch_request_updates_a_demand(self):
new_description = fake.text()
payload = {"description": new_description}
response = self.client.patch(self.url, payload)
eq_(response.status_code, status.HTTP_200_OK)
demand = Demand.objects.get(pk=self.demand.id)
eq_(demand.description, new_description)
def test_put_request_updates_a_demand(self):
payload = factory.build(dict, FACTORY_CLASS=DemandFactory)
response = self.client.put(self.url, payload)
eq_(response.status_code, status.HTTP_200_OK)
demand = Demand.objects.get(pk=self.demand.id)
eq_(demand.description, payload["description"])
def test_set_demand_completed(self):
custom_action = reverse("demands-set-completed", kwargs={"pk": self.demand.pk})
response = self.client.post(custom_action)
eq_(response.status_code, status.HTTP_200_OK)
demand = Demand.objects.get(pk=self.demand.id)
eq_(demand.is_completed, True)
def test_delete_request_deletes_a_demand(self):
response = self.client.delete(self.url)
eq_(response.status_code, status.HTTP_204_NO_CONTENT)
demand = Demand.objects.filter(pk=self.demand.id).first()
eq_(demand, None)
| 34.94
| 87
| 0.704637
| 3,131
| 0.896108
| 0
| 0
| 0
| 0
| 0
| 0
| 256
| 0.073268
|
dd94f0230de4472e8494e2e5c028fe0a163fe4d9
| 422
|
py
|
Python
|
leetcode/python/check_in_n_and_its_double_exists.py
|
subhadig/leetcode
|
9151ea49c342efa228cf82de72736c3445bbfef2
|
[
"Unlicense"
] | null | null | null |
leetcode/python/check_in_n_and_its_double_exists.py
|
subhadig/leetcode
|
9151ea49c342efa228cf82de72736c3445bbfef2
|
[
"Unlicense"
] | null | null | null |
leetcode/python/check_in_n_and_its_double_exists.py
|
subhadig/leetcode
|
9151ea49c342efa228cf82de72736c3445bbfef2
|
[
"Unlicense"
] | null | null | null |
# https://leetcode.com/explore/learn/card/fun-with-arrays/527/searching-for-items-in-an-array/3250/
# time: O(n)
# space: O(n)
class Solution:
def checkIfExist(self, arr: List[int]) -> bool:
if not arr:
return False
nums = set()
for x in arr:
if 2*x in nums or x/2 in nums:
return True
else:
nums.add(x)
return False
| 26.375
| 99
| 0.533175
| 293
| 0.694313
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.293839
|
dd95ba5b789b57d2c18cb6c697a4bed1400af969
| 2,743
|
py
|
Python
|
cloud_functions/trigger-monitor-dag-function/main_test.py
|
google/feedloader
|
f6a25569bc3d7d4ee326961fd3b01e45fc3858e4
|
[
"Apache-2.0"
] | 5
|
2021-02-15T12:49:12.000Z
|
2022-01-12T06:28:41.000Z
|
cloud_functions/trigger-monitor-dag-function/main_test.py
|
google/feedloader
|
f6a25569bc3d7d4ee326961fd3b01e45fc3858e4
|
[
"Apache-2.0"
] | null | null | null |
cloud_functions/trigger-monitor-dag-function/main_test.py
|
google/feedloader
|
f6a25569bc3d7d4ee326961fd3b01e45fc3858e4
|
[
"Apache-2.0"
] | 4
|
2021-02-16T17:28:00.000Z
|
2021-06-18T15:27:52.000Z
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Trigger DAG Cloud Function."""
import os
from unittest import mock
from absl.testing import parameterized
import main
_TEST_CLIENT_ID = '12345.apps.googleusercontent.com'
_TEST_DAG_NAME = 'dag-name'
_TEST_WEBSERVER_ID = 'https://12345-tp.appspot.com'
@mock.patch.dict(
os.environ, {
'CLIENT_ID': _TEST_CLIENT_ID,
'DAG_NAME': _TEST_DAG_NAME,
'WEBSERVER_ID': _TEST_WEBSERVER_ID,
})
class TriggerMonitorDagFunctionTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.event = {
'bucket': 'feed-bucket',
'name': 'filename',
'metageneration': 'test-metageneration',
'timeCreated': '0',
'updated': '0'
}
self.context = mock.create_autospec('google.cloud.functions.Context')
self.context.event_id = '12345'
self.context.event_type = 'gcs-event'
self.context.timestamp = '2021-06-05T08:16:15.183Z'
@mock.patch.object(
main,
'make_iap_request',
side_effect=Exception('Bad request: JSON body error'))
def test_json_body_error(self, _):
trigger_event = None
with self.assertRaises(Exception) as context:
main.trigger_dag(trigger_event, self.context)
self.assertIn('Bad request: JSON body error', str(context.exception))
@mock.patch.object(
main,
'make_iap_request',
side_effect=Exception('Error in IAP response: unauthorized'))
def test_iap_response_error(self, _):
trigger_event = {'file': 'some-gcs-file'}
with self.assertRaises(Exception) as context:
main.trigger_dag(trigger_event, self.context)
self.assertIn('Error in IAP response', str(context.exception))
@mock.patch.object(main, 'make_iap_request', autospec=True)
def test_api_endpoint(self, make_iap_request_mock):
main.trigger_dag(self.event, self.context)
make_iap_request_mock.assert_called_once_with(
'https://12345-tp.appspot.com/api/experimental/dags/dag-name/dag_runs',
'12345.apps.googleusercontent.com',
method='POST',
json={
'conf': self.event,
'replace_microseconds': 'false'
},
)
| 31.528736
| 79
| 0.696318
| 1,712
| 0.624134
| 0
| 0
| 1,873
| 0.682829
| 0
| 0
| 1,255
| 0.457528
|