blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
df03c92bcf943088682c7ef45eb2bcc34e478d71
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/contrib/distribute/python/one_device_strategy.pyi
|
5c91f768c011f01a98b97b3846eb1ca4d2acf93e
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675
| 2019-05-15T06:21:43
| 2019-05-15T06:21:43
| 186,748,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
pyi
|
# Stubs for tensorflow.contrib.distribute.python.one_device_strategy (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.contrib.distribute.python import values as values
from tensorflow.python.framework import constant_op as constant_op, ops as ops
from tensorflow.python.ops import array_ops as array_ops, control_flow_ops as control_flow_ops, math_ops as math_ops
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.util import nest as nest
from typing import Any as Any, Optional as Optional
class OneDeviceStrategy(distribute_lib.DistributionStrategy):
def __init__(self, device: Any, prefetch_on_device: Optional[Any] = ...) -> None: ...
def distribute_dataset(self, dataset_fn: Any): ...
def map(self, map_over: Any, fn: Any, *args: Any, **kwargs: Any): ...
def read_var(self, tower_local_var: Any): ...
def value_container(self, value: Any): ...
@property
def is_single_tower(self): ...
@property
def num_towers(self): ...
@property
def worker_devices(self): ...
@property
def parameter_devices(self): ...
def non_slot_devices(self, var_list: Any): ...
class _OneDeviceTowerContext(distribute_lib.TowerContext):
def __init__(self, distribution_strategy: Any) -> None: ...
@property
def device(self): ...
|
[
"matangover@gmail.com"
] |
matangover@gmail.com
|
4e53a630899bd08654d57a6180f85aaf4deb47f9
|
1c22f63512c6b62e431f01e909778ffa51a50d1b
|
/Sentiment/Model/sentiment.py
|
f544f1bc0e5d292dde2a30994396fea694419008
|
[] |
no_license
|
Alex-Mathai-98/Sarcasm-Detection-in-Product-Reviews-Using-Deep-Learning
|
4550deda84ac35709f064ee9ef8a7410c238edd2
|
339079d18cbb00b85c43637a095e58da2c1e109a
|
refs/heads/master
| 2020-03-21T15:04:28.107078
| 2018-06-27T19:36:14
| 2018-06-27T19:36:14
| 138,692,928
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,119
|
py
|
import numpy as np
from nltk import sent_tokenize
import json, requests
# java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000000
class StanfordCoreNLP:
"""
Modified from https://github.com/smilli/py-corenlp
"""
def __init__(self, server_url):
# TODO: Error handling? More checking on the url?
if server_url[-1] == '/':
server_url = server_url[:-1]
self.server_url = server_url
def annotate(self, text, properties=None):
assert isinstance(text, str)
if properties is None:
properties = {}
else:
assert isinstance(properties, dict)
# Checks that the Stanford CoreNLP server is started.
try:
requests.get(self.server_url)
except requests.exceptions.ConnectionError:
raise Exception('Check whether you have started the CoreNLP server e.g.\n'
'$ cd <path_to_core_nlp_folder>/stanford-corenlp-full-2016-10-31/ \n'
'$ java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port <port> -timeout <timeout_in_ms>')
data = text.encode()
r = requests.post(
self.server_url, params={
'properties': str(properties)
}, data=data, headers={'Connection': 'close'})
output = r.text
if ('outputFormat' in properties
and properties['outputFormat'] == 'json'):
try:
output = json.loads(output, encoding='utf-8', strict=True)
except:
pass
return output
class sentiment_classifier() :
def __init__ (self,text) :
self.text = text
def sentiment_analysis_on_sentence(self,sentence):
# The StanfordCoreNLP server is running on http://127.0.0.1:9000
nlp = StanfordCoreNLP('http://127.0.0.1:9000')
# Json response of all the annotations
output = nlp.annotate(sentence, properties={
"annotators": "tokenize,ssplit,parse,sentiment",
"outputFormat": "json",
# Setting enforceRequirements to skip some annotators and make the process faster
"enforceRequirements": "false"
})
# In JSON, 'sentences' is a list of Dictionaries, the second number is basically the index of the sentence you want the result of, and each sentence has a 'sentiment' attribute and 'sentimentValue' attribute
# "Very negative" = 0 "Negative" = 1 "Neutral" = 2 "Positive" = 3 "Very positive" = 4 (Corresponding value of sentiment and sentiment value)
assert isinstance(output['sentences'], list)
return output['sentences']
def sentence_sentiment(self,sentence):
# checking if the sentence is of type string
assert isinstance(sentence, str)
# getting the json ouput of the different sentences. Type "List"
result = self.sentiment_analysis_on_sentence(sentence)
num_of_sentences = len(result)
sentiment_vec = np.zeros((1,num_of_sentences), dtype = "int64" )
for i in range(0,num_of_sentences):
sentiment_vec[0,i] = ( int(result[i]['sentimentValue']) )
#print(sentiment_vec[0])
return sentiment_vec
def paragraph_sentiment(self,text):
sents = sent_tokenize(self.text)
final_vector = []
for sent in sents :
vec = self.sentence_sentiment(sent)
modified_vec = vec[0]
if len(modified_vec) > 1 :
average = 0
for value in modified_vec :
average += value
average = average/len(modified_vec)
final_vector.append(average)
else :
final_vector.append(modified_vec[0])
return final_vector
def display_value_meanings(self):
setiment_meaning = {'0':'Very Negative','1': 'Negative','2':'Normal','3':'Good','4':'Very Good'}
for i in range(len(setiment_meaning)):
print("{} stands for {}".format(str(i),setiment_meaning[str(i)]))
if __name__ == '__main__':
text = "You are stupid! You're smart and handsome. This is a tool. Rohan is a fantastic person and a great person!"
text = "I think she makes some good points, and I think some things are just put out there and that she doesn't listen. She just wants only her opinion to be right to an extreme. She's good at analyzing situations, but she would not be good for a government position requiring much trust to keep stability, that is for sure. On the other hand, you probably want her to be your Republican lobbyist. A \"friend\" a \"Coulter Jr.\" told me about how great this book is. He acts just like Coulter, but just doesn't publish books and goes out and speaks like she does. Otherwise, he would probably be doing at least okay- (Coulter created and kept her niche first.) I am not particularly Democrat or Republican, but I try to give everything a chance. This book, while giving some fresh perspectives I would not have thought of, is quite hit or miss, too opinionated, and not always reasoning things out enough."
senti = sentiment_classifier(text)
senti.display_value_meanings()
vector = senti.paragraph_sentiment(text)
print(vector)
|
[
"noreply@github.com"
] |
Alex-Mathai-98.noreply@github.com
|
ae159a9c1c63ed349e494c12b82634191cd28e0a
|
1950ade9ea5c8b43762125ce335a2d7a32cac32c
|
/Kafka-json-serde/producer/producer.py
|
5e125028cdfcdbfe5144f3441f9b64f8eedf5d9a
|
[
"MIT"
] |
permissive
|
mydevground/ApacheKafkaPython
|
a5cfd3ea9cc09b6e311bd88cc5a5f8164b743680
|
96da0e5d0294a9823776f9622fca6a3b46ff06bb
|
refs/heads/main
| 2023-08-17T08:05:52.526064
| 2021-09-26T06:26:15
| 2021-09-26T06:26:15
| 410,462,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,498
|
py
|
from uuid import uuid4
from confluent_kafka import SerializingProducer
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.json_schema import JSONSerializer
from confluent_kafka.serialization import StringSerializer
from model.user import User
from utils.config import KAFKA_CONFIGS
from utils.config import LOG
def user_to_dict(user, ctx):
return dict(first_name=user.first_name,
last_name=user.last_name,
email=user.email,
age=user.age)
def delivery_report(err, msg):
if err is not None:
LOG.error("Delivery failed for User record {}: {}".format(msg.key(), err))
return
LOG.info("User record {} successfully produced to {} [{}] at offset {}".format(
msg.key(), msg.topic(), msg.partition(), msg.offset()))
def main():
with open("../resources/schema/user.json") as fp:
schema_str = fp.read()
schema_registry_conf = {'url': KAFKA_CONFIGS['producer']['schema-registry']}
schema_registry_client = SchemaRegistryClient(schema_registry_conf)
json_serializer = JSONSerializer(schema_str=schema_str, schema_registry_client=schema_registry_client,
to_dict=user_to_dict)
producer_conf = {'bootstrap.servers': KAFKA_CONFIGS['producer']['bootstrap-servers'],
'key.serializer': StringSerializer('utf_8'),
'value.serializer': json_serializer}
producer = SerializingProducer(producer_conf)
topic = KAFKA_CONFIGS['producer']['topic']
LOG.info("Producing user records to topic {}. ^C to exit.".format(topic))
while True:
try:
user_first_name = input("Enter first name: ")
user_last_name = input("Enter last name: ")
user_email = input("Enter email: ")
user_age = int(input("Enter age: "))
user = User(first_name=user_first_name,
last_name=user_last_name,
email=user_email,
age=user_age)
producer.produce(topic=topic, key=str(uuid4()), value=user,
on_delivery=delivery_report)
except KeyboardInterrupt:
break
except ValueError:
LOG.error("Invalid input, discarding record...")
continue
producer.poll(0.0)
LOG.info("\nFlushing records...")
producer.flush()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
mydevground.noreply@github.com
|
2a0ecdeb2210d0fd6692bd55462c96df7219bc49
|
97a001f06ff21dd3a5370aa68a603bab5bcfc0cc
|
/I18NSystem/provisionadmin-service0/provisionadmin/db/user.py
|
f2d39a5d3e8fd1217e04870395030a57afbda7d8
|
[] |
no_license
|
vincentchivas/authdesign
|
4bd7e5122cfd686c953bc2ee1d653e65838f5fbb
|
a4e471930615dba010416ee314c73bdccc150585
|
refs/heads/master
| 2021-01-25T08:49:20.848617
| 2014-07-12T09:09:36
| 2014-07-12T09:09:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
from provisionadmin.db.config import USER_DB
from provisionadmin.model.user import User
from provisionadmin.db.seqid import get_next_id
def save_user(user):
assert user
if not user._id:
# assign sequential id
user._id = get_next_id('user')
USER_DB.user.save(user)
return user
|
[
"vincentchivas1989@gmail.com"
] |
vincentchivas1989@gmail.com
|
82f581965a073f92160c11b4648ddcdaa19e8699
|
f14c815cd34f4a64beec88bf79c03a216827ce1e
|
/aboutme/mainapp/migrations/0005_auto_20161220_1209.py
|
86dd510b9b7ced2188cb92e75f31ae96bf59e9e0
|
[] |
no_license
|
malmax/django-lessons
|
ae4206c9bd9f56a4f3a2ee07d7688015ac0736df
|
7b0c4c1c697ec58a685b1e69b795023a976c6b4b
|
refs/heads/master
| 2020-06-12T20:06:57.930445
| 2017-06-24T21:18:56
| 2017-06-24T21:18:56
| 75,756,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-20 09:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0004_auto_20161220_1203'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='region',
field=models.CharField(blank=True, max_length=50),
),
]
|
[
"malmax.spb@gmail.com"
] |
malmax.spb@gmail.com
|
6066f7edce7a8f90fc3fe6ca88de13ea77e332f1
|
2c61671ad98b9336bd6ef2801d8545fb111626fd
|
/search_core/phash.py
|
ed246b1ce51938b95a092dd7a7a8ce5e1515c40c
|
[] |
no_license
|
FelixFox/phash-implementation
|
d8d79a4ccd85bc26c35e62dcb8d3c63bb2f13f31
|
224205f4f74e1059b192c89c6a3c5d14132aeb69
|
refs/heads/master
| 2020-05-22T16:07:29.244790
| 2019-06-13T17:09:36
| 2019-06-13T17:09:36
| 186,423,395
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,185
|
py
|
"""
Author: Anastasia Kriuchkovska
E-mail: staceyhooks0@gmail.com
This is an implementation of PHash algorithm for computing image hash.
The algorithm has next steps:
1. Reducing size and color of a given image to 32x32 resolution.This is done to simplify the DCT calculation and other computations.
2. Applying Discrete Cosine Transform (DCT). Like in JPEG, here DCT separates the image into frequencies and scalars
3. Reducing DCT result. Making it from 32x32 to 8x8 by keeping top-left 8x8 square. This square is a representation of the lowest frequencies of the image.
4. Computing the median value from 8x8 DCT result
5. Getting binary hash. Here we set the 64 hash bits to 0 or 1 depending on whether each of the 64 DCT values is above or below the median value.
The result represents rough relative scale of the frequencies to the median value. If the structure of the image changes a bit, the result will not vary
"""
import math
from PIL import Image
import numpy as np
class PHash:
def __init__(self, image_size=32, hash_size=8):
"""Inits object for applying pHash algo.
Parameters
----------
image_size : int, optional
size of image after reduction of the given one, by default 32
hash_size : int, optional
determines length of image hash (actual hash size is ` self.hash_size**2 ` ), by default 8
"""
self.size = image_size
self.hash_size = hash_size
self.coefs = self.__init_coefs()
def __init_coefs(self):
"""Cretes initial coeficients for hash calculation.
Returns
-------
coefs: list
initial coefitients
"""
coef = []
for i in range(32):
coef.append(1)
coef[0] = 1 / math.sqrt(2)
return coef
def get_hash(self, image: Image) -> np.array:
"""Calculates hash for the given image data.
Parameters
----------
image : PIL.Image
image data
Returns
-------
binary_hash_np: np.array
binary hash
"""
image_gray = image.convert("L").resize(
(self.size, self.size), Image.ANTIALIAS)
pixels = np.asarray(image_gray)
dct_values = self.__dct(pixels)
binary_hash_np = self.__get_binary_hash(dct_values)
return binary_hash_np
def __dct(self, input_vals: np.array) -> np.array:
"""Applies Discrete Cosine Transform for the given input values.
Parameters
----------
input_vals : np.array
input array
Returns
-------
result: np.array
"""
res = np.ones(shape=(self.size, self.size))
for u in range(self.size):
for v in range(self.size):
sum = 0.0
for i in range(self.size):
for j in range(self.size):
dct_part = self.__compute_dct_part(
u, v, i, j, input_vals[i][j])
sum += dct_part
sum *= (self.coefs[u] * self.coefs[v]) / 4.0
res[u][v] = float(sum)
return res
def __compute_dct_part(self, u, v, i, j, input_value):
result = (
math.cos(((2 * i + 1) / (2.0 * self.size)) * u * math.pi)
* math.cos(((2 * j + 1) / (2.0 * self.size)) * v * math.pi)
* input_value
)
return result
def __get_binary_hash(self, dct_result):
"""Reducing DCT result. Making it from image_width x image_height to hash_size x hash_size
by keeping top-left hash_size x hash_size square.
This square is a representation of the lowest frequencies of the image.
Parameters
----------
dct_result : np.array
Returns
-------
binary_hash: np.array
"""
dct_low = dct_result[:self.hash_size, :self.hash_size]
med = np.median(dct_low)
diff = dct_low > med
return diff
|
[
"a.kryuchkovska@cvrndteam.com"
] |
a.kryuchkovska@cvrndteam.com
|
4d492009405f6df5596c3c4c0deb1ca2ec744a00
|
df23284eec33049dfc4d823ca8f8b707b3295fba
|
/Problem Solving/Sherlock and Squares.py
|
6af512c2a6983e2ba0f2bbb70be5a7ff5b748385
|
[] |
no_license
|
Rishabhjain-1509/HackerrankSolution
|
668e2054ad0e6a293c84703e039f7c341a541881
|
38bbae666dd91dba6a8d7412864306127d630a64
|
refs/heads/master
| 2022-11-12T14:34:01.669220
| 2022-11-09T18:02:36
| 2022-11-09T18:02:36
| 256,114,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the squares function below.
def squares(a, b):
count = 0
i = 1
while(i):
if(i*i >= a and i*i <= b):
count = count + 1
if(i*i > b):
break
i = i + 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
ab = input().split()
a = int(ab[0])
b = int(ab[1])
result = squares(a, b)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"noreply@github.com"
] |
Rishabhjain-1509.noreply@github.com
|
a3c08650b204ab32b825981c0860702bce98423a
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/83_3.py
|
12698b43c4778dfb9ad52c496eddfefd80ad857b
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,394
|
py
|
Python | Reverse Interval Slicing String
Sometimes, while working with strings, we can have a problem in which we need
to perform string slicing. In this we can have a variant in which we need to
perform reverse slicing that too interval. This kind of application can come
in day-day programming. Lets discuss certain ways in which this task can be
performed.
**Method #1 : Using String Slicing (1)**
This task can be performed using string slicing, that too nested one. In this,
first slice to peform the Interval and second slice to perform reverse.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Reverse Interval Slicing String
# Using String Slicing 1
# initializing string
test_str = "geeksforgeeks"
# printing original string
print("The original string is : " + test_str)
# initializing Interval
K = 2
# Reverse Interval Slicing String
# Using String Slicing 1
res = test_str[::K][::-1]
# printing result
print("The reverse Interval Slice : " + str(res))
---
__
__
**Output :**
The original string is : geeksforgeeks
The reverse Interval Slice : segoseg
**Method #2 : Using String Slicing (2)**
It is another way in which this task can be performed. In this, we employ
similar way as above, but a different kind of slicing.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Reverse Interval Slicing String
# Using String Slicing 2
# initializing string
test_str = "geeksforgeeks"
# printing original string
print("The original string is : " + test_str)
# initializing Interval
K = 2
# Reverse Interval Slicing String
# Using String Slicing 1
res = test_str[::-1][::K]
# printing result
print("The reverse Interval Slice : " + str(res))
---
__
__
**Output :**
The original string is : geeksforgeeks
The reverse Interval Slice : segoseg
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
c3fcf3efdc21c63082a9599387f5ee9286f23048
|
83f1f89258cb19f49321210e2760f0c0e61962b3
|
/cervicam/settings.py
|
a036d968cc6eb5c17ae74a36c54fd299a13ac669
|
[] |
no_license
|
CerviCam/CaMLER-Main-API
|
c1864336a704b42485c9b86700d02ccbb7a1410d
|
efc60672cc801cea4f56b81b992a363948fd5bf2
|
refs/heads/master
| 2022-12-12T12:28:42.949125
| 2020-09-02T09:14:46
| 2020-09-02T09:14:46
| 279,703,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,022
|
py
|
"""
Django settings for cervicam project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
# Load local env
from dotenv import load_dotenv
load_dotenv()
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 've*v62y$*q=n22*xt9*g97wv4&6mk^ipak=*xz@%-x+!l*g(*%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.getenv('DEBUG', 1)))
ALLOWED_HOSTS = os.getenv('ALLOWED_HOSTS', '*').split(',')
APIS = {
'MAIN': {
'DOMAIN': os.getenv('MAIN_API_DOMAIN', 'http://localhost:8000')
},
'AI': {
'DOMAIN': os.getenv('AI_API_DOMAIN', 'http://localhost:2020')
}
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework_api_key',
'rest_framework',
'rest_framework.authtoken',
'django_filters',
'apps.v1.common',
'apps.v1.user',
'apps.v1.cervic_model',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cervicam.urls'
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': [
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.SearchFilter',
'rest_framework.filters.OrderingFilter',
],
'DEFAULT_PAGINATION_CLASS': 'apps.v1.common.pagination.DefaultLimitOffsetPagination',
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework_api_key.permissions.HasAPIAccess',
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
]
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cervicam.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DBMS = os.getenv('DBMS', 'SQLITE3')
if DBMS == "POSTGRESQL":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DATABASE_NAME'),
'USER': os.getenv('DATABASE_USER'),
'PASSWORD': os.getenv('DATABASE_PASSWORD'),
'HOST': os.getenv('DATABASE_HOST'),
'PORT': os.getenv('DATABASE_PORT'),
}
}
elif DBMS == 'SQLITE3':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Jakarta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# Serve image or video files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.getenv('STORAGE_PATH', os.path.join(BASE_DIR, '/media/'))
|
[
"nann011530@gmail.com"
] |
nann011530@gmail.com
|
c590d814b62bb521395703100ef03566fda4202d
|
a7e89bc0436f67e2160905e7d1becd681acc42c1
|
/vueshengxian/settings.py
|
0ce8c80f10148ebc04eeebb879d5e6082a5d80b8
|
[] |
no_license
|
supermanfeng/shengxianproject
|
6cc718a99d17054a959af264aae88c02d75be10b
|
dba59227e918653c5e6b5d4dd892afc4477eccd1
|
refs/heads/master
| 2020-03-10T13:30:47.214241
| 2018-04-26T13:20:34
| 2018-04-26T13:20:34
| 129,401,909
| 1
| 0
| null | 2018-04-27T12:26:08
| 2018-04-13T12:52:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,679
|
py
|
"""
Django settings for vueshengxian project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
# 一定要放进来
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
sys.path.insert(0, os.path.join(BASE_DIR, 'extra_apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u_7p6#)zf#@s8v%6jsp-&j=p5y%-yl#9(5*j_exxru_jkx7g(p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
AUTH_USER_MODEL = 'users.UserProfile'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps',
'users',
'goods',
'trade',
'user_operation',
'DjangoUeditor',
'crispy_forms',
'rest_framework',
'django_filters',
'corsheaders',
'xadmin',
'rest_framework.authtoken'
]
MIDDLEWARE = [
# 配置前端可以进行跨端口访问
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# 配置前端可以进行跨端口访问
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'vueshengxian.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vueshengxian.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': "shengxian",
"USER": "root",
'PASSWORD': '123fcr',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {'init_command': 'SET default_storage_engine=INNODB;'}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
#自定义用户验证
AUTHENTICATION_BACKENDS = (
'users.views.CustomBackend',
)
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
REST_FRAMEWORK = {
# 用户登录验证
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
# 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
# 'PAGE_SIZE': 10,
}
#jwt设置
import datetime
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
}
#手机号码正则表达式
REGEX_MOBILE = "^1[358]\d{9}$|^147\d{8}$|^176\d{8}$"
#云片网设置
APIKEY = ""
|
[
"2252506855@qq.com"
] |
2252506855@qq.com
|
022fa9467f8e1bb59b76e87ce8ea9203150c0ea2
|
8999b3620b307a9d4d32385a4946552614031289
|
/wv-ner/utils/classifier.py
|
f87431e77f64369d3f8e44f3681b0d6f575a091b
|
[
"MIT"
] |
permissive
|
nishankmahore/Bio-NER
|
8169d7780cc1f29f389ab793e876ef3cb0ca6d86
|
cc36d2871e70d416300a5ac8ac498fbd87b50cd9
|
refs/heads/master
| 2021-06-13T23:17:30.216361
| 2017-03-26T12:39:40
| 2017-03-26T12:39:40
| 86,139,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,345
|
py
|
import numpy as np
import multiprocessing
from multiprocessing import Pool
import operator
import idf
import math
import generate_vector as gen_vec
import cPickle
UNCLASSIFIED_ID = -1
# Classifies the entity candidates
# into one of the predefined categories
# Input: entities - is a list of entities to classify
# wordlist_local - is the wordlist
# vocabulary_local - is the vocabulary
# args_local - is the parameters
# category_local - is the category vectors
def classify(
entities, wordlist_path, dictlist_path,
vocabulary_path, categories, nbr_sents_path,
sent_freq_path, context_window, context_weight,
internal_weight, with_weighted_context, nbr_of_threads,
classification_threshold, similarity_threshold):
wordlist = []
with open(wordlist_path, "rb") as f:
wordlist = cPickle.load(f)
dictlist = dict()
with open(dictlist_path, "rb") as f:
dictlist = cPickle.load(f)
number_of_sentences = 0
with open(nbr_sents_path, "r") as f:
number_of_sentences = cPickle.load(f)
sentence_frequency = dict()
with open(sent_freq_path, "r") as f:
sentence_frequency = cPickle.load(f)
if nbr_of_threads == 1:
classified_entities = _classify_entities_batch(
entities, wordlist, dictlist,
vocabulary_path, categories, number_of_sentences,
sentence_frequency, context_window, context_weight,
internal_weight, with_weighted_context,
classification_threshold, similarity_threshold)
else:
if nbr_of_threads == 0:
nbr_of_threads = multiprocessing.cpu_count()
npchunks = np.array_split(np.array(entities), nbr_of_threads)
pool = Pool(nbr_of_threads)
try:
chunks = [[
chunk, wordlist, dictlist,
vocabulary_path, categories, number_of_sentences,
sentence_frequency, context_window, context_weight,
internal_weight, with_weighted_context,
classification_threshold, similarity_threshold] for chunk in list(npchunks)]
classified_entities = reduce(operator.add, pool.map(_classify_entities_batch_star, chunks))
except:
pool.terminate()
pool.close()
raise ValueError('Error occurred while classifying in thread.')
pool.terminate()
pool.close()
return classified_entities
def _classify_entities_batch_star(p):
return _classify_entities_batch(
p[0], p[1], p[2], p[3],
p[4], p[5], p[6], p[7],
p[8], p[9], p[10], p[11],
p[12])
def _classify_entities_batch(
nps, wordlist, dictlist,
vocabulary_path, categories, number_of_sentences,
sentence_frequency, context_window, context_weight,
internal_weight, with_weighted_context,
classification_threshold, similarity_threshold):
classified_entities = []
latest_word = 0
wordlist_length = len(wordlist)
for nphrase in nps:
sig = gen_vec.generate(
vocabulary_path, wordlist, dictlist, [nphrase],
number_of_sentences, sentence_frequency, context_window,
context_weight, internal_weight, with_weighted_context)
if not (sig is None) and len(sig) != 0:
(category_id, sims) = _classify_entity(sig, categories, classification_threshold, similarity_threshold)
if category_id != UNCLASSIFIED_ID:
np_terms = nphrase.split(" ")
np_term_length = len(np_terms)
for i in xrange(latest_word, wordlist_length):
if wordlist[i:i + np_term_length] == np_terms:
classified_entities.append((category_id, i, i + np_term_length - 1, sims))
latest_word = i + np_term_length
break
return classified_entities
def _classify_entity(entityV, categories, classification_threshold, similarity_threshold):
if len(entityV) == 0:
return (UNCLASSIFIED_ID, ["?%", "?%", "?%"])
else:
similarities = [abs(similarity(entityV, cV)) for cV in categories]
if any([math.isnan(s) for s in similarities]) or all([s == 0.0 for s in similarities]):
return (UNCLASSIFIED_ID, ["0%", "0%", "0%"])
max_val = max(similarities)
max_id = similarities.index(max_val)
simsum = sum(similarities)
normsim = [str(int((currsim/simsum)*100)) + "%" for currsim in similarities]
for i,s in enumerate(similarities):
if i != max_id and (s/float(max_val)) > classification_threshold:
return (UNCLASSIFIED_ID, normsim)
if max_val > similarity_threshold:
return (max_id, normsim)
else:
return (UNCLASSIFIED_ID, normsim)
def similarity(entityV, categoryV):
if entityV is None:
return 0.0
numerator = (np.dot(entityV, categoryV))
denominator = (math.sqrt(entityV.dot(entityV))*math.sqrt(categoryV.dot(categoryV)))
if denominator == 0 or denominator == 0.0:
return 0.0
return numerator/float(denominator)
|
[
"noreply@github.com"
] |
nishankmahore.noreply@github.com
|
cdb4e7a702048cc9880d517fa50965096560ac74
|
9c26f77c96d5d3b23a7c7e3f266b50dc17783a87
|
/Regist/models.py
|
141100d6e1c0cca26073354baee1e01c3f2483ef
|
[] |
no_license
|
gysxl/sportTimeServer
|
eb2664cefe388571365cc2fd0fbd70d9dcd6aa97
|
d1010b9354d6881751d01814a4fae5ff0f06bd37
|
refs/heads/master
| 2020-04-05T12:08:35.355872
| 2017-07-03T10:52:51
| 2017-07-03T10:52:51
| 95,221,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class UserInformation(models.Model):
uid = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
password = models.CharField(max_length=200)
user_name = models.CharField(max_length=200)
roles = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
# def __str__(self): # __unicode__ on Python 2
# return self.password
class PushData(models.Model):
push_id = models.IntegerField(default=0)
uid = models.CharField(max_length=200)
month = models.CharField(max_length=200, default="")
date_info = models.CharField(max_length=200, default="")
class CouponData(models.Model):
uid = models.ForeignKey(UserInformation, on_delete=models.CASCADE)
coupon_data = models.IntegerField(default=0)
class PresentInformation(models.Model):
pres_id = models.IntegerField(default=0)
pres_name = models.CharField(max_length=200)
pres_number = models.IntegerField(default=0)
class ConvertData(models.Model):
convert_id = models.IntegerField(default=0)
uid = models.ForeignKey(UserInformation, on_delete=models.CASCADE)
pres_id = models.ForeignKey(PresentInformation, on_delete=models.CASCADE)
|
[
"shanxiaolan@didichuxing.com"
] |
shanxiaolan@didichuxing.com
|
32d349fdae719b7d00ff419d829cbf443f06200b
|
47a9218ef23194f36428452a8aa18d33528f6e44
|
/utility_class.py
|
c9747a07a6e0ed0b6643c44592e2b8ef8afe215e
|
[] |
no_license
|
AhmedEldib/Realtime-Patient-Heartrate-GUI
|
43595a51c1b37c919e7cfb702e164c993cbf9585
|
709da1f10b771ce0933f7fb2502f8bde43c549af
|
refs/heads/main
| 2023-02-19T12:39:05.050434
| 2021-01-24T10:23:08
| 2021-01-24T10:23:08
| 332,417,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
import numpy as np
class Plot_Data(object):
def __init__(self):
self.Plot_Stop = 1
self.GraphIsWav = 0
self.Gmin = 0
self.Gmax = 0
self.data_X = []
self.data_Y = []
self.data_stop = 0
def Stop(self):
self.Plot_Stop = 1
def Start(self):
self.Plot_Stop = 0
def Empty(self):
self.data_X = []
self.data_Y = []
|
[
"noreply@github.com"
] |
AhmedEldib.noreply@github.com
|
8bcabbeb501546724c29278bc074a0352026e866
|
f2f5d06d6b6e51ad669232b38c131569158e2c55
|
/tests/models/__init__.py
|
7c39409b0f0bb3b1b237931383d888faade65dc5
|
[
"Apache-2.0"
] |
permissive
|
nickspring/architect
|
070131a96bd9e4cfddd468c41ec55e0b2cb1e2af
|
2e320249da6986e5fcc0060819d8b210bc6dd8a7
|
refs/heads/master
| 2021-01-16T20:06:32.387896
| 2015-01-29T07:34:10
| 2015-01-29T07:34:10
| 22,872,673
| 0
| 1
| null | 2015-01-30T18:19:29
| 2014-08-12T09:53:49
|
Python
|
UTF-8
|
Python
| false
| false
| 532
|
py
|
from architect.orms import BasePartitionableMixin
class AbstractPartitionableModel(BasePartitionableMixin):
"""Abstract partitionable model used for tests not connected to specific ORM"""
@property
def model_meta(self):
return {
'table': None,
'pk': None,
'database': 'postgresql',
'column_value': None,
}
def execute_raw_sql(self, sql):
return lambda: None
@classmethod
def get_empty_instance(cls, dsn=None):
return cls()
|
[
"tepkeev@gmail.com"
] |
tepkeev@gmail.com
|
e56f53592022d142eab866b93632c4aab530b95c
|
288902fa603d234a5785620ab45f9a180d7139b8
|
/Entity_extraction/entity_extraction/code/BiLSTM_model.py
|
0d0b12cc27b7df204e554c826b6db49356f85b43
|
[] |
no_license
|
ItsPavan/Entity_extraction
|
e3c1309fa73e1470f5459db59e949a4449414155
|
e83d021c0a71be98a4a32d3a02be56796f4915f1
|
refs/heads/master
| 2020-05-15T17:07:40.705092
| 2019-05-22T15:18:27
| 2019-05-22T15:18:27
| 182,400,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,204
|
py
|
import numpy as np
from validation import compute_f1
from keras.models import Model
from keras.layers import TimeDistributed,Conv1D,Dense,Embedding,Input,Dropout,LSTM,Bidirectional,MaxPooling1D,Flatten,concatenate
from data_preprocessing import readfile,createBatches,createMatrices,iterate_minibatches,addCharInformatioin,padding
from keras.utils import Progbar
from keras.preprocessing.sequence import pad_sequences
from keras.initializers import RandomUniform
epochs = 50
def tag_dataset(dataset):
correctLabels = []
predLabels = []
b = Progbar(len(dataset))
for i,data in enumerate(dataset):
tokens, casing,char, labels = data
tokens = np.asarray([tokens])
casing = np.asarray([casing])
char = np.asarray([char])
pred = model.predict([tokens, casing,char], verbose=False)[0]
pred = pred.argmax(axis=-1) #Predict the classes
correctLabels.append(labels)
predLabels.append(pred)
b.update(i)
b.update(i+1)
return predLabels, correctLabels
trainSentences = readfile("A:\\zycus-test\\workspace-alewo4\\data\\train.csv")
# devSentences = readfile("data/valid.txt")
testSentences = readfile("A:\\zycus-test\\workspace-alewo4\\data\\test.csv")
trainSentences = addCharInformatioin(trainSentences)
# devSentences = addCharInformatioin(devSentences)
testSentences = addCharInformatioin(testSentences)
labelSet = set()
words = {}
# for dataset in [trainSentences, devSentences, testSentences]:
for dataset in [trainSentences , testSentences]:
for sentence in dataset:
for token,char,label in sentence:
labelSet.add(label)
words[token.lower()] = True
# :: Create a mapping for the labels ::
label2Idx = {}
for label in labelSet:
label2Idx[label] = len(label2Idx)
# :: Hard coded case lookup ::
case2Idx = {'numeric': 0, 'allLower':1, 'allUpper':2, 'initialUpper':3, 'other':4, 'mainly_numeric':5, 'contains_digit': 6, 'PADDING_TOKEN':7}
caseEmbeddings = np.identity(len(case2Idx), dtype='float32')
# :: Read in word embeddings ::
word2Idx = {}
wordEmbeddings = []
fEmbeddings = open("A:\\zycus-test\\workspace-alewo4\\embeddings\\glove.6B\\glove.6B.100d.txt", encoding="utf-8")
for line in fEmbeddings:
split = line.strip().split(" ")
word = split[0]
if len(word2Idx) == 0: #Add padding+unknown
word2Idx["PADDING_TOKEN"] = len(word2Idx)
vector = np.zeros(len(split)-1) #Zero vector vor 'PADDING' word
wordEmbeddings.append(vector)
word2Idx["UNKNOWN_TOKEN"] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, len(split)-1)
wordEmbeddings.append(vector)
if split[0].lower() in words:
vector = np.array([float(num) for num in split[1:]])
wordEmbeddings.append(vector)
word2Idx[split[0]] = len(word2Idx)
wordEmbeddings = np.array(wordEmbeddings)
char2Idx = {"PADDING":0, "UNKNOWN":1}
for c in " 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,-_()[]{}!?:;#'\"/\\%$`&=*+@^~|":
char2Idx[c] = len(char2Idx)
train_set = padding(createMatrices(trainSentences,word2Idx, label2Idx, case2Idx,char2Idx))
# dev_set = padding(createMatrices(devSentences,word2Idx, label2Idx, case2Idx,char2Idx))
test_set = padding(createMatrices(testSentences, word2Idx, label2Idx, case2Idx,char2Idx))
idx2Label = {v: k for k, v in label2Idx.items()}
np.save("A:\\zycus-test\\workspace-alewo4\\models\\idx2Label.npy",idx2Label)
np.save("A:\\zycus-test\\workspace-alewo4\\models\\word2Idx.npy",word2Idx)
train_batch,train_batch_len = createBatches(train_set)
# dev_batch,dev_batch_len = createBatches(dev_set)
test_batch,test_batch_len = createBatches(test_set)
words_input = Input(shape=(None,),dtype='int32',name='words_input')
words = Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1], weights=[wordEmbeddings], trainable=False)(words_input)
casing_input = Input(shape=(None,), dtype='int32', name='casing_input')
casing = Embedding(output_dim=caseEmbeddings.shape[1], input_dim=caseEmbeddings.shape[0], weights=[caseEmbeddings], trainable=False)(casing_input)
character_input=Input(shape=(None,52,),name='char_input')
embed_char_out=TimeDistributed(Embedding(len(char2Idx),30,embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5)), name='char_embedding')(character_input)
dropout= Dropout(0.5)(embed_char_out)
conv1d_out= TimeDistributed(Conv1D(kernel_size=3, filters=30, padding='same',activation='tanh', strides=1))(dropout)
maxpool_out=TimeDistributed(MaxPooling1D(52))(conv1d_out)
char = TimeDistributed(Flatten())(maxpool_out)
char = Dropout(0.5)(char)
output = concatenate([words, casing,char])
output = Bidirectional(LSTM(200, return_sequences=True, dropout=0.50, recurrent_dropout=0.25))(output)
output = TimeDistributed(Dense(len(label2Idx), activation='softmax'))(output)
model = Model(inputs=[words_input, casing_input,character_input], outputs=[output])
model.compile(loss='sparse_categorical_crossentropy', optimizer='nadam')
model.summary()
# plot_model(model, to_file='model.png')
for epoch in range(epochs):
print("Epoch %d/%d"%(epoch,epochs))
a = Progbar(len(train_batch_len))
for i,batch in enumerate(iterate_minibatches(train_batch,train_batch_len)):
labels, tokens, casing,char = batch
model.train_on_batch([tokens, casing,char], labels)
a.update(i)
a.update(i+1)
print(' ')
model.save("A:\\zycus-test\\workspace-alewo4\\models\\model.h5")
# Performance on dev dataset
# predLabels, correctLabels = tag_dataset(dev_batch)
# pre_dev, rec_dev, f1_dev = compute_f1(predLabels, correctLabels, idx2Label)
# print("Dev-Data: Prec: %.3f, Rec: %.3f, F1: %.3f" % (pre_dev, rec_dev, f1_dev))
# Performance on test dataset
predLabels, correctLabels = tag_dataset(test_batch)
pre_test, rec_test, f1_test= compute_f1(predLabels, correctLabels, idx2Label)
print("Test-Data: Prec: %.3f, Rec: %.3f, F1: %.3f" % (pre_test, rec_test, f1_test))
|
[
"noreply@github.com"
] |
ItsPavan.noreply@github.com
|
fa9fbb603c16db9e26e7e660c7ff3e4fa90630e1
|
543437ea84eb375dfa57df2aa8f4e311e9908d61
|
/kyu7/Exes_And_Ohs.py
|
ba2438c1d478ffcc9025c6851b2617f58a7958eb
|
[] |
no_license
|
GabrielCernei/codewars
|
cd1ef8d3875fc463cc8f7fdadeb7ba21a3ea6a1c
|
5795028a358423f1f3bcab2f2fa1292482945ee8
|
refs/heads/master
| 2022-02-21T20:10:29.071148
| 2019-09-29T14:42:02
| 2019-09-29T14:42:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
# https://www.codewars.com/kata/exes-and-ohs/train/python
def xo(s):
return s.lower().count('x') == s.lower().count('o')
|
[
"ldunbar@mac.com"
] |
ldunbar@mac.com
|
85b948d2703e272f744671e008a744a9bd8c6d65
|
af83f15a1ffb57b148fe641bde29359615797e9b
|
/data_visualization/line-charts.py
|
d3fc28bf6cb708abb29ea4f241385bc9035a2808
|
[] |
no_license
|
umeshkrishna19/data_science_python
|
150c3df74f56364ca423430070d2ac8b269bfe1d
|
c082f9c7fa5bb48b31e18cdc210cc3c3a6df5170
|
refs/heads/master
| 2020-03-27T06:14:36.954945
| 2018-08-25T12:55:50
| 2018-08-25T12:55:50
| 146,091,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
from matplotlib import pyplot as plt
variance = [1, 2, 4, 8, 16, 32, 64, 128, 256]
bias_squared = [256, 128, 64, 32, 16, 8, 4, 2, 1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = [i for i,_ in enumerate(variance)]
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance,
'g-', label='variance')
# green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2')
# red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
|
[
"umeshkrishna33@gmail.com"
] |
umeshkrishna33@gmail.com
|
7fc26b188d20bf6c8ef668492cbff6978181da8f
|
c653ba46c93dfb1a71502ff7faaa950d0477dc70
|
/backend/home/migrations/0002_load_initial_data.py
|
897e315a325ba64f2383ac2e9dd9456ac0b72605
|
[] |
no_license
|
crowdbotics-apps/auto-union-25946
|
ce8452683dd7a9a1cdacbb572da2f15c6f813f64
|
c00b77ad38870ad2e94910d279881ca02e873302
|
refs/heads/master
| 2023-04-06T11:23:45.366243
| 2021-04-26T14:37:43
| 2021-04-26T14:37:43
| 361,783,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Auto Union"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Auto Union</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "auto-union-25946.botics.co"
site_params = {
"name": "Auto Union",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
a1d603e38160d60eb5f9195479294dfaa110719b
|
a43a63616e68719338946fe8f187c83016e99abb
|
/data_center/check_global_metop.py
|
d78bcb0ce4121048f88dd80c81ea79f0d3606597
|
[] |
no_license
|
isabella232/pytroll
|
172c41313109bb2d735d2acc3f700862d994cdf3
|
871438e3a32ec147b7605540fa2788e073ff339b
|
refs/heads/master
| 2023-03-31T18:44:09.048669
| 2020-12-11T10:49:38
| 2020-12-11T10:49:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,884
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2011.
# Author(s):
# Lars Ø. Rasmussen <ras@dmi.dk>
# Martin Raspaud <martin.raspaud@smhi.se>
# This file is part of pytroll.
# Pytroll is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# Pytroll is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# pytroll. If not, see <http://www.gnu.org/licenses/>.
"""A simple producer.
"""
import time
from datetime import datetime, timedelta
import glob
import os
from posttroll.message import Message
from dc.connections import DCConnectionsPush
PATH = "/data/prod/satellit/metop"
PATTERN = "AVHR_xxx_1B_M02_*"
stamp = datetime.utcnow() - timedelta(hours=1)
def get_file_list(timestamp):
"""Get files.
"""
flist = glob.glob(os.path.join(PATH, PATTERN))
result = []
for fil in flist:
if not os.path.isfile(fil):
continue
mtime = os.stat(fil).st_mtime
dt_ = datetime.utcfromtimestamp(mtime)
if timestamp < dt_:
result.append((fil, dt_))
return sorted(result, lambda x, y: cmp(x[1], y[1]))
def younger_than_stamp_files():
"""Uses glob polling to get new files.
"""
global stamp
for fil, tim in get_file_list(stamp):
yield os.path.join(PATH, fil)
stamp = tim
def send_new_files():
"""Create messages and send away.
"""
for fil in younger_than_stamp_files():
base = os.path.basename(fil)
metadata = {
"filename": base,
"URIs": ["file://"+fil],
"type": "HRPT 1b",
"format": "EPS 1b",
"time_of_first_scanline": datetime.strptime(base[16:30],
"%Y%m%d%H%M%S").isoformat(),
"time_of_last_scanline": datetime.strptime(base[32:46],
"%Y%m%d%H%M%S").isoformat()}
import pprint
pprint.pprint(metadata)
yield Message('/dc/polar/gds', 'update', metadata)
if __name__ == '__main__':
CONNECTIONS = DCConnectionsPush().start()
# wait to get a connection
time.sleep(3)
while True:
try:
#send_new_files()
for i in send_new_files():
CONNECTIONS.send(i)
time.sleep(60)
except (KeyboardInterrupt, SystemExit):
print "quitting ..."
CONNECTIONS.stop()
break
|
[
"martin.raspaud@smhi.se"
] |
martin.raspaud@smhi.se
|
cbd7ba5fae05ffd93fa6c536b0a752122d028cc3
|
9dfe2e76f5c40ef9e510e060fba03eb864f48bb7
|
/applications/home/forms.py
|
a1bbe44b7f3b6eef346c31b9e20fbffe4e75d11c
|
[] |
no_license
|
Rogrback/myblogs
|
e682c82b35337dd6db715e662512f7f842d6ab7f
|
4480ab02a4a0a9a27314478d2f6aa9cfad736d66
|
refs/heads/master
| 2023-04-08T15:50:51.243912
| 2021-04-17T10:24:14
| 2021-04-17T10:24:14
| 358,731,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
from django import forms
from .models import Subscribers, Contact
class SubscribersForm(forms.ModelForm):
class Meta:
model = Subscribers
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(
attrs={
'placeholder': 'Correo Electrónico',
}
),
}
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('__all__')
|
[
"Rogrback"
] |
Rogrback
|
790242253551921871eed0901841dd8404a53daa
|
ef24c6be7a4dbff90c328252b1150f8496a8e24c
|
/main.py
|
70e417d064d13dc9d488767d624c46dced2ef040
|
[] |
no_license
|
junruhong/Hogwarts
|
097eed8d86f3c9ff5aaf932b4a649a9f03f4b0b1
|
c4aeb41b1825e36fa0391f45aebf983ae0e605d7
|
refs/heads/main
| 2023-04-01T02:33:18.289727
| 2021-04-09T09:36:48
| 2021-04-09T09:36:48
| 352,335,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
if __name__ == __main__:
print("Hello World!")
|
[
"noreply@github.com"
] |
junruhong.noreply@github.com
|
cd21b43044e2ba9e947604eea4e7b2d8bd78d434
|
3d825b7cab1eccade255c076ec5d4d62ca94bdb6
|
/0x10-python-network_0/6-peak.py
|
5e9aa8aa788610632261adefb81b34da49b3e570
|
[] |
no_license
|
banuaksom/holbertonschool-higher_level_programming
|
5805710a0154943f56d47a615a27ab22a8f10577
|
3dcd23f1ab093d379f1230b0a3930bd63de28d9d
|
refs/heads/master
| 2020-07-23T01:45:39.557838
| 2020-02-14T05:48:17
| 2020-02-14T05:48:17
| 207,405,308
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
#!/usr/bin/python3
def increasing(list_of_integers):
""" returns True if elements are increasing in the list"""
for i in range(1, len(list_of_integers)):
if list_of_integers[i - 1] > list_of_integers[i]:
return False
return True
def decreasing(list_of_integers):
""" returns True if elements are decreasing in the list """""
for i in range(1, len(list_of_integers)):
if list_of_integers[i - 1] < list_of_integers[i]:
return False
return True
def find_peak(list_of_integers):
""" finds a peak in a list of unsorted integers """
if list_of_integers:
if increasing(list_of_integers):
return list_of_integers[-1]
if decreasing(list_of_integers):
return list_of_integers[0]
for i in range(1, len(list_of_integers) - 1):
if (list_of_integers[i] >= list_of_integers[i - 1] and
list_of_integers[i] > list_of_integers[i + 1]):
return list_of_integers[i]
if (list_of_integers[i] > list_of_integers[i - 1] and
list_of_integers[i] >= list_of_integers[i + 1]):
return list_of_integers[i]
return None
|
[
"banuaksom@gmail.com"
] |
banuaksom@gmail.com
|
9caa87c714caeb14f804b8f01a0bdfe5bd8da9f0
|
42dd44434fd104da927779f7e81992623b4b8d5b
|
/src/gems/urls.py
|
46608ceba4fd51ad061c0f64101fbf89ac730a27
|
[] |
no_license
|
sanya9652/gems
|
881185aa9b6bf34733c3af7b7b57088e83305a6e
|
07c304157d7b5dc46a4a09e022d7c5c95b26c4ef
|
refs/heads/master
| 2023-05-02T21:21:30.831049
| 2021-05-20T03:03:50
| 2021-05-20T03:03:50
| 369,046,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
"""gems URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from gems_app.views import FileUploadView
urlpatterns = [
path('admin/', admin.site.urls),
path('upload/<path:filename>', FileUploadView.as_view()),
]
|
[
"sanya9652@gmail.com"
] |
sanya9652@gmail.com
|
149290fb61f26b9cd5b11e1a83017d51d7b18665
|
2b565c386993f08256f3375bc05e188728c78d3b
|
/NewsSection/views/delete_news.py
|
5eea6db62ab338d72cb10930d71194abce08a6ea
|
[] |
no_license
|
ivvlko/AI_Portal
|
25f26400cf3b353ef362cbe77c5698c349cb4f51
|
4d0f96922a75e7982b1d1e44f6addec4b30f103c
|
refs/heads/main
| 2023-02-14T19:15:01.885012
| 2020-12-30T13:12:47
| 2020-12-30T13:12:47
| 316,210,781
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
from django.shortcuts import render, redirect
from NewsSection.models import News
def delete_view(request, pk):
current = News.objects.get(pk=pk)
if request.method == 'GET':
context = {'current': current}
return render(request, 'news/delete.html', context)
else:
current.delete()
return redirect('all_news_url')
|
[
"ivankoevbg@gmail.com"
] |
ivankoevbg@gmail.com
|
3070a319b22776752ac9bf28563a25fbf8456049
|
7693437bacd10d47f406857ac63de2928a36a561
|
/py/slope/dependency.py
|
2c8555f87eb5e49da1f3c942f68ca02a8389f681
|
[] |
no_license
|
oholter/matcher-with-word-embedings
|
d555f8f43f9bd76fedc39925fa93c2af24eaf967
|
0fe584496c61c8b9c01188c5e08c1a927b7b971d
|
refs/heads/master
| 2021-09-10T15:58:56.215353
| 2019-06-12T10:40:57
| 2019-06-12T10:40:57
| 160,821,473
| 1
| 0
| null | 2021-09-01T18:39:47
| 2018-12-07T12:33:34
|
Java
|
UTF-8
|
Python
| false
| false
| 1,985
|
py
|
import numpy as np
import scipy.stats as stats
x = np.array([1, 0.8, 0.6, 0.4, 0.2, 0])
# EKAW-EKAW
#y = np.array([0.75, 0.65, 0.48, 0.31, 0.12, 0.01]) #struc best
#y = np.array([1.0, 0.97, 0.84, 0.62, 0.34, 0.03]) #struc disambiguate
#y = np.array([0.81, 0.75, 0.61, 0.42, 0.29, 0.13]) #synonyms best
#y = np.array([0.97, 0.91, 0.85, 0.68, 0.62, 0.34]) #synonyms disambiguate
#y = np.array([0.92, 0.82, 0.72, 0.52, 0.29, 0.15]) #synonyms all relations
#y = np.array([0.90, 0.90, 0.77, 0.49, 0.23, 0.01]) #synonyms translation m
#y = np.array([0.63, 0.60, 0.44, 0.27, 0.14, 0.0]) #rdf2vec best
#y = np.array([0.48, 0.47, 0.52, 0.56, 0.56, 0.29]) #two documents
#y = np.array([0.87, 0.79, 0.66, 0.41, 0.21, 0.01]) # secondorder
#y = np.array([0.60, 0.69, 0.68, 0.65, 0.48, 0.04]) # subclass
# EKAW-CMT
#y = np.array([0.60, 0.48, 0.30, 0.23, 0.10, 0.00]) #struc best
#y = np.array([0.85, 0.70, 0.53, 0.43, 0.33, 0.08]) #struc disambiguate
#y = np.array([0.45, 0.33, 0.20, 0.23, 0.05, 0.05]) #synonyms best
#y = np.array([0.68, 0.65, 0.45, 0.23, 0.15, 0.05]) #synonyms disambiguate
#y = np.array([0.40, 0.30, 0.33, 0.18, 0.18, 0.03]) #synonyms all relations
#y = np.array([1.00, 0.75, 0.63, 0.38, 0.38, 0.13]) #synonyms translation m
#y = np.array([0.13, 0.0, 0.0, 0.0, 0.0, 0.0]) #rdf2vec best
#y = np.array([0.75, 0.58, 0.70, 0.70, 0.63, 0.20]) #two documents
#y = np.array([0.98, 0.78, 0.55, 0.50, 0.23, 0.03]) # secondorder
#y = np.array([0.95, 0.80, 0.75, 0.53, 0.40, 0.23]) # subclass
# ANATOMY
y = np.array([0.78, 0.60, 0.47, 0.34, 0.21, 0.00]) #synonyms best
#y = np.array([0.69, 0.62, 0.56, 0.45, 0.33, 0.14]) #synonyms best
sx = x.sum()
sy = y.sum()
n = len(x)
xy = x*y
xx = x*x
yy = y*y
sxy = xy.sum()
sxx = xx.sum()
syy = yy.sum()
#print(yy)
b = ((n * sxy) - (sx*sy)) / ((n * sxx) - (sx * sx))
a = ((sy * sxx) - (sx * sxy)) / ((n * sxx) - (sx * sx))
print("f(x) = {} + {}x".format(a,b))
corr, _ = stats.pearsonr(x,y)
print("pearson's correlation: {}".format(corr))
|
[
"oholter@gmail.com"
] |
oholter@gmail.com
|
b16686bfedad4f9cdcb9048e0edaec1658ba20f5
|
fa54d74e8627fac1566f90b827c73ad5c4bc08ca
|
/0x07-python-test_driven_development/100-matrix_mul.py
|
73fbf51abf6cebfd09a9fd89a0ae57ae05738fc4
|
[] |
no_license
|
AdrianaAriza/holbertonschool-higher_level_programming
|
87b70810fc52c7c6036b92163f2b8d8e5a8568ae
|
ec8d1b03907c51905e7d611e71c8ff37e3892764
|
refs/heads/master
| 2020-09-29T08:02:58.249872
| 2020-05-17T04:51:56
| 2020-05-17T04:51:56
| 226,993,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
#!/usr/bin/python3
def matrix_mul(m_a, m_b):
""" multiplies two matrix
"""
if not isinstance(m_a, list):
raise TypeError("m_a must be a list")
if not isinstance(m_b, list):
raise TypeError("m_b must be a list")
if all(not isinstance(i, list) for i in m_a):
raise TypeError("m_a must be a list of lists")
if all(not isinstance(i, list) for i in m_b):
raise TypeError("m_b must be a list of lists")
if m_a == [] or m_a == [[]]:
raise ValueError("m_a can't be empty")
if m_b == [] or m_b == [[]]:
raise ValueError("m_b can't be empty")
for i in m_a:
if not all(isinstance(j, (int, float)) for j in i):
raise TypeError("m_a should contain only integers or floats")
for i in m_b:
if not all(isinstance(j, (int, float)) for j in i):
raise TypeError("m_b should contain only integers or floats")
if not all(len(i) == len(m_a[0]) for i in m_a):
raise TypeError("each row of m_a must be of the same size")
if not all(len(i) == len(m_b[0]) for i in m_b):
raise TypeError("each row of m_b must be of the same size")
if len(m_a[0]) != len(m_b):
raise ValueError("m_a and m_b can't be multiplied")
n_m = []
for i in range(len(m_a)):
f = []
for k in range(len(m_b[0])):
n = 0
for j in range(len(m_b)):
n = n + m_a[i][j] * m_b[j][k]
f.append(n)
n_m.append(f)
return (n_m)
|
[
"1296@holbertonschool.com"
] |
1296@holbertonschool.com
|
305bea4821d1fb35977cb803fd7bebff35361dee
|
130095e322c49e513318e181f0c6a8cd65c924ef
|
/python-mid/zmienne_i_kod/8.enumerate_zip.py
|
a22296143ceb20d5308fba290f6a3cba48142b20
|
[] |
no_license
|
irekpi/lekcje
|
90c8e1eeb844ae4dc90a1fa8d59c04b7596c42a6
|
56f9c831e413610ead8d793ac2f296df749e19d1
|
refs/heads/master
| 2022-12-14T15:00:14.684436
| 2020-09-22T07:51:04
| 2020-09-22T07:51:04
| 229,556,749
| 0
| 1
| null | 2021-06-02T02:17:49
| 2019-12-22T11:22:03
|
HTML
|
UTF-8
|
Python
| false
| false
| 338
|
py
|
projects = ['Brexit', 'Nord Stream', 'US Mexico Border']
leaders = ['Theresa May', 'Wladimir Putin', 'Dolan Trump and Bill Clinton']
dates = ['2016-06-23', '2016-08-29', '1994-01-01']
combo = zip(projects, leaders, dates)
for project, leader, date in combo:
print('{} project name, created by {} on {}'.format(project, leader, date))
|
[
"pilat.irek@gmail.com"
] |
pilat.irek@gmail.com
|
2bec75e97a11ef1e07664dedebdf67839fff55af
|
1c64a7091770b432bc2caefea2d34933369dafda
|
/suduku.py
|
fd9bfa92c1fd4cb21664a2d01d7adf12d47d67e2
|
[] |
no_license
|
praneeth-gunti/Sudoku
|
92af72fde73b991ca610c44400ba2f2ad688406b
|
70037bf125c2382a180a94c2a1432e1f43a4a446
|
refs/heads/master
| 2021-05-17T02:40:06.117308
| 2020-03-27T16:11:39
| 2020-03-27T16:11:39
| 250,580,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
grid = [
[7, 8, 0, 4, 0, 0, 1, 2, 0],
[6, 0, 0, 0, 7, 5, 0, 0, 9],
[0, 0, 0, 6, 0, 1, 0, 7, 8],
[0, 0, 7, 0, 4, 0, 2, 6, 0],
[0, 0, 1, 0, 5, 0, 9, 3, 0],
[9, 0, 4, 0, 6, 0, 0, 0, 5],
[0, 7, 0, 3, 0, 0, 0, 1, 2],
[1, 2, 0, 0, 0, 7, 4, 0, 0],
[0, 4, 9, 2, 0, 6, 0, 0, 7]
]
def valid(b, val, pos):
# row
for i in range(len(b[0])):
if b[pos[0]][i] == val and i != pos[1]:
return False
# col
for j in range(len(b)):
if b[j][pos[1]] == val and j != pos[0]:
return False
# 3*3 grid
grid_x = pos[0] // 3
grid_y = pos[1] // 3
for i in range(grid_x*3, grid_x*3 + 3):
for j in range(grid_y*3, grid_y*3 + 3):
if b[i][j] == val and (i, j) != pos:
return False
return True
def print_board(b):
for i in range(len(b)):
if i % 3 == 0:
print("----------------------------------------")
for j in range(len(b[0])):
if j % 3 == 0:
print(" | ", end=" ")
if j < 8:
print(str(b[i][j]) + " ", end=" ")
else:
print(str(b[i][j]) + " | ")
print("----------------------------------------")
def empty_space(b):
for i in range(len(b)):
for j in range(len(b[0])):
if b[i][j] == 0:
return i, j # row, col
return False
def solve(b):
zero = empty_space(b)
if not zero:
return True
else:
row, col = zero
for i in range(1, 10):
if valid(b, i, (row, col)):
b[row][col] = i
if solve(b):
return True
else:
b[row][col] = 0
return False
print_board(grid)
print("-------------SOLUTION-------------------")
solve(grid)
print_board(grid)
|
[
"noreply@github.com"
] |
praneeth-gunti.noreply@github.com
|
ed9dae9ae49b4287a6a6ddc79ee23f42c03b011d
|
817c9646fe8c36c858779aa52eade3e1676ccd97
|
/conkit/io/rosetta_npz.py
|
55b3a0c5af503354f28e921a6b316213caaf92a7
|
[
"BSD-3-Clause"
] |
permissive
|
rigdenlab/conkit
|
6ce790a5a7b7f70eabb584b439b275db1f00458d
|
926f194a660d95350e9172d236c9c002e8a921a3
|
refs/heads/master
| 2022-09-20T23:47:59.414757
| 2022-07-22T09:21:14
| 2022-07-22T09:21:14
| 30,641,512
| 19
| 13
|
BSD-3-Clause
| 2022-07-22T09:21:15
| 2015-02-11T09:58:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,789
|
py
|
# BSD 3-Clause License
#
# Copyright (c) 2016-21, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Parser module specific to rosetta NPZ distance predictions
"""
import numpy as np
from conkit.io._parser import BinaryDistanceFileParser
from conkit.core.distance import Distance
from conkit.core.distogram import Distogram
from conkit.core.distancefile import DistanceFile
DISTANCE_BINS = ((0, 2), (2, 2.5), (2.5, 3), (3, 4), (4, 4.5), (4.5, 5), (5, 5.5), (5.5, 6), (6, 6.5), (6.5, 7),
(7, 7.5), (7.5, 8), (8, 8.5), (8.5, 9), (9, 9.5), (9.5, 10), (10, 10.5), (10.5, 11), (11, 11.5),
(11.5, 12), (12, 12.5), (12.5, 13), (13, 13.5), (13.5, 14), (14, 14.5), (14.5, 15), (15, 15.5),
(15.5, 16), (16, 16.5), (16.5, 17), (17, 17.5), (17.5, 18), (18, 18.5), (18.5, 19), (19, 19.5),
(19.5, 20), (20, np.inf))
class RosettaNpzParser(BinaryDistanceFileParser):
"""Parser class for rosetta NPZ distance prediction file"""
def read(self, f_handle, f_id="rosettanpz"):
"""Read a distance prediction file
Parameters
----------
f_handle
Open file handle [read permissions]
f_id : str, optional
Unique contact file identifier
Returns
-------
:obj:`~conkit.core.distancefile.DistanceFile`
"""
hierarchy = DistanceFile(f_id)
hierarchy.original_file_format = "rosettanpz"
_map = Distogram("distogram_1")
hierarchy.add(_map)
prediction = np.load(f_handle, allow_pickle=True)
probs = prediction['dist']
# Bin #0 corresponds with d>20A & bins #1 ~ #36 correspond with 2A<d<20A in increments of 0.5A
probs = probs[:, :, [x for x in range(1, 37)] + [0]]
L = probs.shape[0]
for i in range(L):
for j in range(i, L):
_distance = Distance(i + 1, j + 1, tuple(probs[i, j, :].tolist()), DISTANCE_BINS)
_map.add(_distance)
return hierarchy
def write(self, f_handle, hierarchy):
"""Write a distance file instance to a file
Raises
------
:exc:`NotImplementedError`
Write function not available
"""
raise NotImplementedError("Write function not available yet")
|
[
"filo_san_rod@hotmail.com"
] |
filo_san_rod@hotmail.com
|
89772ac936538b983c17a068d7bd36224ddc2dd3
|
16735e2254170b5e27a5f91ae05f56eb3f31b5da
|
/GibbsRunner.py
|
2705311e475f5f098ed7d607151ebef97bc28d67
|
[] |
no_license
|
DogaCUlupinar/GibbsSampling
|
8f1518029ead6d7eff619cc62ac8a6928d2f19b4
|
3c79ba13bf291f4d89ab491a3c3c3e17383d98e0
|
refs/heads/master
| 2021-01-10T18:06:04.850050
| 2016-03-07T01:39:42
| 2016-03-07T01:39:42
| 53,286,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,998
|
py
|
from sampling.gibbsampler import *
from collections import defaultdict,Counter
from operator import itemgetter
import random
import numpy as np
import logging
import math
import matplotlib.pyplot as plt
import logging
CONFIG_LABEL = "LABEL"
CONFIG_PRIOR = "PRIOR"
CONFIG_GREEDY = "GREEDY"
CONFIG_COLOR = "COLOR"
configurations = [{CONFIG_LABEL:'Naive Gibbs', CONFIG_GREEDY:False,CONFIG_PRIOR:False, CONFIG_COLOR:'b' },\
{CONFIG_LABEL:'Prior Knowledge', CONFIG_GREEDY:False,CONFIG_PRIOR:True,CONFIG_COLOR:'r' },\
{CONFIG_LABEL:'Psuedo Greedy', CONFIG_GREEDY:True,CONFIG_PRIOR:False,CONFIG_COLOR:'g'},\
{CONFIG_LABEL:'Prior Knowledge and Greedy', CONFIG_GREEDY:True,CONFIG_PRIOR:True, CONFIG_COLOR:'k' }]
def makeMotifScoreCountGraph(seqs,kmer_len,ax,configurations,iterations):
found_kmer = defaultdict(list)
count= Counter()
sequence_kmers = selectInitalKmers(seqs, kmer_len)
for i in range(iterations):
if i % max(1,(iterations/10)) == 0: print "{0:.0f}% is done".format(float(i)/iterations*100)
consensus_string,score,data,profile = gibbs(sequence_kmers,determinstic=configurations[CONFIG_GREEDY] )
prior_profile = profile if configurations[CONFIG_PRIOR] else None
sequence_kmers = selectInitalKmers(seqs, kmer_len,prior = prior_profile)
count[score]+=1
found_kmer[score].append(consensus_string)
lables,values = zip(*sorted(count.items(),key=itemgetter(0)))
ax.plot(lables,values,'-o',label=configurations['LABEL'])
ks = min(found_kmer.keys())
return ks,found_kmer[ks]
def makeNumIterations(seqs,kmer_len,ax,configurations,passes,original_kmer):
max_iterations = 1000
found_kmer = defaultdict(list)
count= Counter()
sequence_kmers = selectInitalKmers(seqs, kmer_len)
total_iterations = []
for pass_c in range(passes):
if pass_c % max(1,(passes/10)) == 0: logging.critical("{0:.0f}% is done".format(float(pass_c)/passes*100))
for i in range(max_iterations):
consensus_string,score,data,profile = gibbs(sequence_kmers,determinstic=configurations[CONFIG_GREEDY] )
prior_profile = profile if configurations[CONFIG_PRIOR] else None
sequence_kmers = selectInitalKmers(seqs, kmer_len,prior = prior_profile)
count[score]+=1
found_kmer[score].append(consensus_string)
if consensus_string == original_kmer:
break
total_iterations.append(i)
lables,values = zip(*sorted(count.items(),key=itemgetter(0)))
ax.plot(lables,values,'-o',label=configurations['LABEL'])
print configurations[CONFIG_LABEL]
print "Average Iteration till motif: ",sum(total_iterations)/float(passes),np.std(total_iterations)
ks = min(found_kmer.keys())
return ks,found_kmer[ks]
def makeSTDevGraph(seqs,kmer_len,ax,configurations,iterations):
found_kmer = defaultdict(list)
count= Counter()
lines = []
iter_to_conv = [] #iterations required to converge
sequence_kmers = selectInitalKmers(seqs, kmer_len)
for i in range(iterations):
if i % max(1,(iterations/10)) == 0: print "{0:.0f}% is done".format(float(i)/iterations*100)
consensus_string,score,data,profile = gibbs(sequence_kmers,determinstic=configurations[CONFIG_GREEDY] )
prior_profile = profile if configurations[CONFIG_PRIOR] else None
sequence_kmers = selectInitalKmers(seqs, kmer_len,prior = prior_profile)
count[score]+=1
found_kmer[score].append(consensus_string)
lines.append(data)
iter_to_conv.append(data[0][-1])
"""
for line in lines:
ax.plot(*line,color='b')
"""
max_iteration = reduce(lambda x,y : max(x,len(y[0])) if type(x) == int else max(len(x[0]),len(y[0])), lines)
mean_curve = np.zeros(max_iteration)
stdev_curve = np.zeros(max_iteration)
#calculate mean and stdev
for i in range(max_iteration):
col = []
for line in lines:
try:
col.append(line[1][i])
except IndexError:
pass
stdev_curve[i] = np.std(col)
mean_curve[i] = sum(col)/float(len(col))
ax.plot(mean_curve,'-o',label=configurations[CONFIG_LABEL],color=configurations[CONFIG_COLOR])
ax.errorbar(np.arange(max_iteration),mean_curve,yerr=stdev_curve,color=configurations[CONFIG_COLOR], linestyle="None")
ks = min(found_kmer.keys())
print configurations[CONFIG_LABEL]
print "Average iteration is {0}, with sdt {1}".format(str(sum(iter_to_conv)/float(len(iter_to_conv))),str(np.std(np.asarray(iter_to_conv))))
return ks,found_kmer[ks][0]
def makeSTDGraphs(mismatch,iter):
fig,ax=plt.subplots()
seqs,kmer = generateSequences(250, 15, 10,mismatch)
print "THE ORIGINAL SEQUENCES"
print str("\n".join(seqs))
print "THE ORIGINAL KMER"
print kmer
print "THE NUMBER OF MISMATCHES ",mismatch
for config in configurations:
print makeSTDevGraph(seqs,len(kmer),ax,config,iter)
ax.legend(loc='upper right')
plt.xlabel('Iterations')
plt.ylabel('Motif Entropy')
plt.title("Motifs with {0} mismatch(es)".format(str(mismatch)))
plt.show()
def makeIterGraphs(mismatch,iter):
fig,ax=plt.subplots()
seqs,kmer = generateSequences(250, 15, 10, mismatch)
print "THE ORIGINAL SEQUENCES"
print str("\n".join(seqs))
print "THE ORIGINAL KMER"
print kmer
print "THE NUMBER OF MISMATCHES ",mismatch
for config in configurations:
print makeNumIterations(seqs,len(kmer),ax,config,iter,kmer)
ax.legend(loc='upper right')
plt.xlabel('Motif Entropy')
plt.ylabel('Count')
plt.title("Motifs with {0} mismatch(es)".format(str(mismatch)))
plt.show()
if __name__ == "__main__":
makeSTDGraphs(3,1000)
|
[
"doga.ulupinar@engineering.ucla.edu"
] |
doga.ulupinar@engineering.ucla.edu
|
8ff62fff0868018a0ce0127b70d20898d9581f9d
|
351b640723bb62c92f914f95ce880dfea6911d6d
|
/MediService/settings.py
|
ca797a35cccee58af8f3eb1f9552f573f254fbb6
|
[] |
no_license
|
Piplohasankamrul/MediServices
|
e768383e3f489349f7fa05111eb8cf9b53d0c04a
|
3aaf34d3559ab03c4b15c05bef4187b48681b5b8
|
refs/heads/master
| 2023-08-14T22:42:38.622169
| 2021-09-08T15:06:44
| 2021-09-08T15:06:44
| 358,980,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,811
|
py
|
"""
Django settings for MediService project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xs1#i!-i1v!*%=6x0!yoi=1r1!0m2o3&c=3ahgb8-y542-%8fn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
# General use templates & template tags (should appear first)
'adminlte3',
# Optional: Django admin theme (must be before django.contrib.admin)
'adminlte3_theme',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Accounts',
'crispy_forms',
'bookAppointment',
'medishop',
'MediSeApp',
'OrderApp',
'Product',
'mptt',
'widget_tweaks',
'multiselectfield',
'Ambulance',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MediService.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MediService.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'MediService1',
'USER': 'postgres',
'PASSWORD': '16961',
'HOST': 'Localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'Accounts.CustomUser'
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"piplohasank@gmail.com"
] |
piplohasank@gmail.com
|
2beded6d1c8cf44b5bbe40dff3738ac2ee1e766d
|
cc2182f0126905969826710fd97f39b6af932a26
|
/libOSC.py
|
99b6d40c8edaf384e9454dd0836c54ac34eefd98
|
[] |
no_license
|
mizumasa/HydroPumper
|
e1da77123a755378d7453b481b0f888de105f085
|
450de0b371a8fbc484696b4cfb148c33b16d6945
|
refs/heads/master
| 2020-08-29T01:34:26.669865
| 2019-11-12T13:58:39
| 2019-11-12T13:58:39
| 217,881,778
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,811
|
py
|
""" sending OSC with pyOSC
https://trac.v2.nl/wiki/pyOSC
example by www.ixi-audio.net based on pyOSC documentation
"""
import OSC
import time, random
import os, sys
import threading
IP_HEAD = "192.168.1."
TAB_IPS =[
IP_HEAD+"1",
IP_HEAD+"2",
IP_HEAD+"3"
]
TAB_IPS =[
IP_HEAD+"11",
IP_HEAD+"12",
IP_HEAD+"13",
IP_HEAD+"14",
IP_HEAD+"15",
IP_HEAD+"16",
IP_HEAD+"17",
IP_HEAD+"18",
IP_HEAD+"19",
IP_HEAD+"20",
IP_HEAD+"21",
IP_HEAD+"22"
]
TAB_IPS =[
IP_HEAD+"14",
IP_HEAD+"12",
IP_HEAD+"22",
IP_HEAD+"18",
IP_HEAD+"15",
IP_HEAD+"17"
]
TAB_IPS2 =[
IP_HEAD+"101"
]
TAB_PORT = 12346
GUN_IPS =[
IP_HEAD+"5",
IP_HEAD+"6",
IP_HEAD+"7"
]
GUN_IPS =[
IP_HEAD+"11",
IP_HEAD+"12",
IP_HEAD+"13",
IP_HEAD+"14",
IP_HEAD+"15",
IP_HEAD+"16",
IP_HEAD+"17",
IP_HEAD+"18",
IP_HEAD+"19",
IP_HEAD+"20"
]
GUN_IPS =[
IP_HEAD+"40",
IP_HEAD+"41",
IP_HEAD+"42",
IP_HEAD+"43",
IP_HEAD+"44"
]
GUN_PORT = 12346
PC_IP = IP_HEAD+"50"
PC_PORT = 12345
PC_LOCAL_PORT = 12349
PC_LOCAL_IP = PC_IP
LOCAL_IP = '127.0.0.1'
class MY_OSC:
def __init__(self):
self.myIP = PC_IP
self.myPORT = PC_PORT
self.ip = ""
self.host = ""
self.dst = []
self.bArrived = False
self.recv = []
self.recvItem = []
self.localMode = False
self.debug = False
return
def setLocal(self):
self.myIP = LOCAL_IP
self.localMode = True
def setDebug(self):
self.debug = True
def client_sendto(self,msg,path):
try:
self.client.sendto(msg,path)
except:
print("send error",msg,path)
def sendStartAudio(self):
msg = self.makeMsg("/startaudio",[])
if self.debug:
print("Send start audio to ", PC_LOCAL_PORT, msg)
self.client_sendto(msg,(PC_LOCAL_IP, PC_LOCAL_PORT))
self.client_sendto(msg,(PC_LOCAL_IP, PC_LOCAL_PORT))
def sendAudioMode(self,iMode):
msg = self.makeMsg("/audio",[iMode])
if self.debug:
print("Send audio mode to ", PC_LOCAL_PORT, msg)
self.client_sendto(msg,(PC_LOCAL_IP, PC_LOCAL_PORT))
self.client_sendto(msg,(PC_LOCAL_IP, PC_LOCAL_PORT))
def sendAudioLevel(self,level):
msg = self.makeMsg("/audiolevel",[level])
if self.debug:
print("Send audio level to ", PC_LOCAL_PORT, msg)
self.client_sendto(msg,(PC_LOCAL_IP, PC_LOCAL_PORT))
self.client_sendto(msg,(PC_LOCAL_IP, PC_LOCAL_PORT))
def myMsgPrinter_handler(self, addr, tags, data, client_address):
print "osc://%server%server ->" % (OSC.getUrlStr(client_address), addr),
print "(tags, data): (%server, %server)" % (tags, data)
def myMsg_handler(self, addr, tags, data, client_address):
print "(tags, data): (%s, %s)" % (tags, data)
self.bArrived = True
self.recv.append([client_address,addr,data[0]])
def hit_handler(self, addr, tags, data, client_address):
print "(tags, data): (%s, %s)" % (tags, data)
print "hit !!!!!!!!!!!!!!!!!!!"
self.bArrived = True
self.recv.append([client_address,addr,data[0]])
def hititem_handler(self, addr, tags, data, client_address):
print "(tags, data): (%s, %s)" % (tags, data)
print "hit item!!!!!!!!!!!!!!!!!!!"
self.bArrived = True
self.recv.append([client_address,addr,data[0]])
self.recvItem.append([client_address,0])
def setup(self):
print("work as server",self.myIP,self.myPORT)
self.server = OSC.OSCServer((self.myIP,self.myPORT))
self.server.addDefaultHandlers()
self.server.addMsgHandler("/print", self.myMsgPrinter_handler)
self.server.addMsgHandler("/msg", self.myMsg_handler)
self.server.addMsgHandler("/hit", self.hit_handler)
self.server.addMsgHandler("/hititem", self.hititem_handler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.start()
self.client = OSC.OSCClient()
return
def clearRecv(self):
self.recv = []
self.recvItem = []
def get(self):
if self.bArrived:
ret = self.recv
self.recv = []
self.bArrived = False
return ret
return None
def kill(self):
try:
self.server.close()
print "server closed 1"
self.server_thread.join()
except:
print "first error"
try:
self.server.close()
print "server closed 2"
self.server_thread.join()
except:
print "seccond error"
pass
pass
def sendAll(self, msg, ips, port):
for i in ips:
try:
if self.debug:
print("Send to ",i,port,msg)
self.client_sendto(msg, (i, port))
except:
print "send error"
def makeMsg(self, address, content):
msg = OSC.OSCMessage(address)
for i in content:
msg.append(i)
return msg
def sendTab(self, address, content, idx):
msg = self.makeMsg(address,content)
if self.localMode:
if self.debug:
print("Send tab to ", LOCAL_IP, TAB_PORT, msg)
self.client_sendto(msg,(LOCAL_IP, TAB_PORT))
else:
self.sendAll(msg, [TAB_IPS[idx]], TAB_PORT)
return
def sendTabAll(self, address, content):
msg = self.makeMsg(address,content)
try:
if self.localMode:
if self.debug:
print("Send tab to ",LOCAL_IP, TAB_PORT, msg)
self.client_sendto(msg,(LOCAL_IP, TAB_PORT))
else:
self.sendAll(msg, TAB_IPS, TAB_PORT)
except:
print("sendTabAll error")
return
def sendGun(self, address, content, idx):
msg = self.makeMsg(address,content)
if self.localMode:
if self.debug:
print("Send gun to ",LOCAL_IP,msg)
self.client_sendto(msg,(LOCAL_IP, GUN_PORT))
else:
self.sendAll(msg, [GUN_IPS[idx]], GUN_PORT)
return
def sendGunAll(self, address, content):
msg = self.makeMsg(address,content)
if self.localMode:
if self.debug:
print("Send gun to ",LOCAL_IP,msg)
self.client_sendto(msg,(LOCAL_IP, GUN_PORT))
else:
self.sendAll(msg, GUN_IPS, GUN_PORT)
return
def main():
pass
if __name__=='__main__':
argvs=sys.argv
print argvs
main()
|
[
"masaru0mizuochi@gmail.com"
] |
masaru0mizuochi@gmail.com
|
2faea9a51fdfb40ad39aff8d81b01e9e7b3a715f
|
6864b967242eba8380fb087da7e1ad77786b27ac
|
/video_downloader.py
|
049ac907a1a105c62a0cb28ab9391b811cbd7cca
|
[] |
no_license
|
S4more/discord-drive
|
b875326b17551f301aeddd6086e1b112adbe85b6
|
888a8172361cc7c54caac58471c84c8ced2b5760
|
refs/heads/main
| 2023-03-13T11:29:40.667883
| 2021-04-03T21:27:34
| 2021-04-03T21:27:34
| 354,218,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
import youtube_dl
class VideoDownloader():
def download(self, link: str, title: str) -> str:
'''
Downloads a video and returns the extension of the downloaded video.
'''
ydl_opts = {'noplaylist': True, 'outtmpl': f"./imgs/{title}.%(ext)s"}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(link)
try:
return info['ext']
except KeyError: # It could be facebook
return info['entries'][0]['ext']
if __name__ == '__main__':
vd = VideoDownloader()
print(vd.download("https://www.facebook.com/groups/1573953756236079/permalink/1585387575092697/", "aha"))
|
[
"guimsc@hotmail.com"
] |
guimsc@hotmail.com
|
76e68b2c58619f361a1344e8429350d3703fab75
|
5d49f51d30da3a8cf4d112619b9c75e8c01c884b
|
/sumSubsetRec.py
|
b0131497f20d6b2dd984e1c5add8fe98fcef1bbc
|
[] |
no_license
|
riyag283/Dynamic-Programming
|
43436262a89a3aa1c0e1390c985bc3556159ceae
|
5186afd863d31f121ed2845b45606e3e6eb4b46a
|
refs/heads/master
| 2022-12-29T23:34:20.699794
| 2020-10-14T14:57:57
| 2020-10-14T14:57:57
| 291,990,436
| 3
| 2
| null | 2020-10-14T14:57:58
| 2020-09-01T12:22:23
|
Python
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
# Subset Sum Problem
# Recursive solution
'''
Given a set of non-negative integers, and a value sum,
determine if there is a subset of the given set with sum equal to given sum.
'''
def checkSubset(set,n,sum):
#base cases
if sum==0:
return True
if (n==0 and sum!=0):
return False
#if last element is greater than sum
if set[n-1]>sum:
return checkSubset(set,n-1,sum)
#else check if last element can be included in the sum or not
else:
return (checkSubset(set,n-1,sum-set[n-1]) or checkSubset(set,n-1,sum))
#driver code
n=int(input("Enter number of elements: "))
set = [int(input("Enter element: ")) for i in range(n)]
sum=int(input("Enter sum: "))
print(set)
if checkSubset(set,n,sum)==True:
print ("Yes, found.")
else:
print ("Sorry, not found.")
|
[
"noreply@github.com"
] |
riyag283.noreply@github.com
|
1b69748663c95321fecc50613a16f49a4ecc0cc7
|
64552b1a53ba1f09f88e861377fbc2cc362b47e3
|
/bangumiInfoEditor/bangumiGeneralInfoEnterButton.py
|
fe9bcac49050cc7a041be178e79897dae5144d76
|
[] |
no_license
|
HHHHhgqcdxhg/bangumiDesktopAssistant
|
736e399ac24f100ef31306f34df3a659525a04d6
|
67b8baa52c5f4ece01089f293871b995aa925040
|
refs/heads/master
| 2022-11-09T21:42:27.857873
| 2018-11-02T09:33:40
| 2018-11-02T09:33:40
| 172,485,424
| 2
| 1
| null | 2022-11-01T07:55:36
| 2019-02-25T10:32:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,228
|
py
|
from PyQt5.QtWidgets import QFrame, QLabel, QListWidget, QVBoxLayout, QPushButton, QGridLayout, QTextEdit, QComboBox,QMessageBox,QFileDialog
from PyQt5.QtCore import Qt
import datetime,json
from .makeChaptersInfo import BangumiChapters
from config import PATH
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
class BangumiGeneralInfoEnterButton(QPushButton):
def __init__(self, superEl):
self.superEl = superEl
super(BangumiGeneralInfoEnterButton, self).__init__()
self.setText("保存")
# self.mainLayout.addWidget(self.enterButton, 10, 0, 1, 7)
def mousePressEvent(self, e):
if (e.button() == Qt.LeftButton):
try:
self.superEl.lastSetPlatformData()
newData = {
"title": self.superEl.mainTitleEditor.toPlainText(),
"headImgSrc": self.superEl.headImagePathDisableLabel.text(),
"startChapter": int(self.superEl.firstUpdateChapterEditor.toPlainText()),
"startDate": self.superEl.startDateEditor.toPlainText(),
"finishDate": self.superEl.finalDateEditor.toPlainText(),
"updateTime": self.superEl.dayTimeEditor.toPlainText(),
"updateType": "weekly",
"updateDay": self.superEl.updateDayEditor.toPlainText(),
"platFormTargetUrls": self.superEl.data["platFormTargetUrls"],
"follow": True
}
if self.superEl.updateTypeComboBox.currentText() == "月更":
newData["updateType"] = "monthly"
if self.superEl.followComboBox.currentText() == "否":
newData["follow"] = False
if newData["updateType"] == "weekly":
if not newData["updateDay"] in ["周一","周二","周三","周四","周五","周六","周日"]:
QMessageBox.critical(self, "错误", "周更番的更新日请填周一到周日中的一个")
else:
try:
if not int(newData["updateDay"]) in range(1,31):
raise Exception()
except:
QMessageBox.critical(self, "错误", "月更番的更新日请填入1~31之间的数字!")
bangumiChapters = BangumiChapters(newData)
# self -> bangumiGeneralConfig -> rightColumn -> BangumiInfoEditorWindow
scheduleList = self.superEl.superEl.superEl.leftColumn.scheduleList
scheduleList.myCurrentItem.setText(bangumiChapters.title)
scheduleList.myCurrentItem.data = bangumiChapters
with open(f"{PATH}/src/db/bangumisInfo/{bangumiChapters.title}.json","w+",encoding="utf8") as f:
writeDict = bangumiChapters.makeDict()
json.dump(writeDict,f,ensure_ascii=False,cls=CJsonEncoder)
# print(self.superEl.superEl.superEl.leftColumn.scheduleList.myCurrentItem.text())
# self->generalconfig->RightColumn->BangumiInfoEditorWindow
try:
self.superEl.superEl.superEl.mainWindow.contentHolder.content.reloadChildren()
except:
pass
QMessageBox.about(self, "设置成功!", "设置成功!")
except Exception as e:
stre = str(e)
if stre == "noImage":
QMessageBox.critical(self, "错误", "请添加图片")
elif stre == "noTitle":
QMessageBox.critical(self, "错误", "标题不可以为新添番剧!")
elif stre == "titleerror":
QMessageBox.critical(self, "错误", "标题中不可以包含;.|*/:?\"<>!")
else:
QMessageBox.critical(self, "错误", "请正确填写且不能留空")
|
[
"2894700792@qq.com"
] |
2894700792@qq.com
|
3448e0efdb09db7afd788f27404d6fce2a0f312f
|
48b61a48b829f8511ff46ca761b5730519192229
|
/aircrafts/spiders/spiderUrl6.py
|
58bf8c02f49e7b16e7a078b401be371830d9cb7c
|
[] |
no_license
|
sirBarbouchi/aircrafts
|
ce32640cdf6aa13ac0ad8d1ea0139efc2bc51ec5
|
0741049edfb45ddce12799d39d3efe08cd3db134
|
refs/heads/master
| 2023-07-04T10:02:24.867518
| 2021-08-02T17:41:36
| 2021-08-02T17:41:36
| 392,025,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,139
|
py
|
import scrapy
from scrapy_splash import SplashRequest
from ..items import AircraftsItem
from scrapy.http import Request
import json
class BusinessairSpider(scrapy.Spider):
name = 'businessair'
def start_requests(self):
url = "https://www.businessair.com/jet-aircraft"
splash_args = {'timeout': 85, 'wait': 2.5}
yield SplashRequest(url=url, callback=self.parse_pages, args=splash_args, dont_filter=True)
def parse_pages(self, response):
splash_args = {'timeout': 85, 'wait': 2.5}
aircrafts = response.xpath('//a[text()="View »"]/@href').extract()
for aircraft in aircrafts:
make = aircraft.split('/')[2]
model = aircraft.split('/')[3]
source = "https://www.businessair.com"+aircraft
aircraftsItem = AircraftsItem()
aircraftsItem['source'] = source
aircraftsItem['make'] = make
aircraftsItem['model'] = model
#print("https://www.businessair.com"+aircraft)
yield SplashRequest(url="https://www.businessair.com"+aircraft, callback=self.parse, args=splash_args, dont_filter=True, meta={"aircraftsItem": aircraftsItem})
href = response.xpath('//a[text()="›"]/@href').extract_first()
if href:
next_url = "https://www.businessair.com" + href
yield SplashRequest(url=next_url, callback=self.parse_pages, args=splash_args, dont_filter=True)
def parse(self, response):
infos = response.css('.even::text').extract()
if len(infos)>2:
year = infos[1]
time = infos[2]
serial_number = infos[3]
price = infos[5]
dealer = response.css('.views-field-title-1 a::text').extract_first()
aircraftsItem = response.meta.get('aircraftsItem')
aircraftsItem['year'] = year
aircraftsItem['time'] = time
aircraftsItem['serial_Number'] = serial_number
aircraftsItem['price'] = price
aircraftsItem['dealer'] = dealer
yield aircraftsItem
|
[
"sirbarbouchi@gmail.com"
] |
sirbarbouchi@gmail.com
|
b8e1eb1ec0b16f2553edd815d9ba235dadd6d48c
|
41062dd5d20c3d69411f79755e937c0b6a739826
|
/tests/blueprints/slack/decorators_test.py
|
9147646579327f8548e841eaaa4a3ca665219e83
|
[
"MIT"
] |
permissive
|
Thornycrackers-Forks/busy-beaver
|
6dd7effda854b632f089fc5c8e991b42e83b940f
|
e47d481d217e137ecc2a9e450b9a101b508ce50f
|
refs/heads/master
| 2020-04-26T01:30:44.932497
| 2019-09-20T00:39:06
| 2019-09-20T00:39:06
| 173,206,575
| 0
| 0
|
MIT
| 2019-03-01T00:08:25
| 2019-03-01T00:08:25
| null |
UTF-8
|
Python
| false
| false
| 2,789
|
py
|
from flask import Flask, jsonify
import pytest
from busy_beaver.app import handle_http_error
from busy_beaver.blueprints.slack.decorators import verify_slack_signature
from busy_beaver.exceptions import UnverifiedWebhookRequest
SLACK_SIGNING_SECRET = "8f742231b10e8888abcd99yyyzzz85a5"
SLACK_SIGNATURE = "v0=a2114d57b48eac39b9ad189dd8316235a7b4a8d21a10bd27519666489c69b503"
@pytest.fixture(scope="module")
def slack_verification_app(app):
@app.route("/slack-only")
@verify_slack_signature(SLACK_SIGNING_SECRET)
def slack_only():
return jsonify({"authorization": "slack_endpoint"})
@app.route("/all-users")
def unlocked_endpoint():
return jsonify({"authorization": "all_users"})
app.register_error_handler(UnverifiedWebhookRequest, handle_http_error)
yield app
@pytest.fixture(scope="module")
def client(slack_verification_app):
yield slack_verification_app.test_client()
def test_unlocked_endpoint_success(client):
result = client.get("/all-users")
assert result.status_code == 200
def test_slack_verified_endpoint_failure_without_header(client):
result = client.get("/slack-only")
assert result.status_code == 401
def test_slack_verified_endpoint_failure_with_slack_signature_header(client):
result = client.get("/slack-only", headers={"X-Slack-Signature": SLACK_SIGNATURE})
assert result.status_code == 401
def test_slack_verified_endpoint_failure_without_body(client):
result = client.get(
"/slack-only",
headers={
"X-Slack-Signature": SLACK_SIGNATURE,
"X-Slack-Request-Timestamp": 1_531_420_618,
},
)
assert result.status_code == 401
def test_slack_verified_endpoint_success(client):
result = client.get(
"/slack-only",
headers={
"X-Slack-Signature": SLACK_SIGNATURE,
"X-Slack-Request-Timestamp": 1_531_420_618,
},
data=(
"token=xyzz0WbapA4vBCDEFasx0q6G&team_id=T1DC2JH3J&team_domain=testteamnow&"
"channel_id=G8PSS9T3V&channel_name=foobar&user_id=U2CERLKJA&"
"user_name=roadrunner&command=%2Fwebhook-collect&text=&"
"response_url=https%3A%2F%2Fhooks.slack.com%2Fcommands%2FT1DC2JH3J%2F3977"
"00885554%2F96rGlfmibIGlgcZRskXaIFfN&trigger_id=398738663015.47445629121.8"
"03a0bc887a14d10d2c447fce8b6703c"
),
)
assert result.status_code == 200
@pytest.mark.parametrize("x", [None, 31, [234], (1,), set()])
def test_slack_verification_decorator_raises_valueerror__signing_secret_env_not_set(x):
api = Flask(__name__)
with pytest.raises(ValueError):
@api.route("/auth")
@verify_slack_signature(x)
def auth():
return jsonify({"hello": "world!"})
|
[
"noreply@github.com"
] |
Thornycrackers-Forks.noreply@github.com
|
117c55bc7b712ce4809539178d9070e8ead67252
|
e160d3922c7032044c19f295c6fd0c30a2fff724
|
/supar/utils/transform.py
|
af629112217879eec90e5b2b8c9fbbd7be5c8b5f
|
[
"MIT"
] |
permissive
|
ck624/parser-1
|
496f6979d258b3f1baf71fa62591325c0e3f7204
|
d7b6ae5498bd045e34c1e1c55ab8e619cf4ad353
|
refs/heads/main
| 2023-01-12T23:52:59.884118
| 2020-11-24T05:59:57
| 2020-11-24T05:59:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,459
|
py
|
# -*- coding: utf-8 -*-
from collections.abc import Iterable
import nltk
from supar.utils.logging import get_logger, progress_bar
logger = get_logger(__name__)
class Transform(object):
r"""
A Transform object corresponds to a specific data format.
It holds several instances of data fields that provide instructions for preprocessing and numericalizing, etc.
Attributes:
training (bool):
Sets the object in training mode.
If ``False``, some data fields not required for predictions won't be returned.
Default: ``True``.
"""
fields = []
def __init__(self):
self.training = True
def __call__(self, sentences):
pairs = dict()
for field in self:
if field not in self.src and field not in self.tgt:
continue
if not self.training and field in self.tgt:
continue
if not isinstance(field, Iterable):
field = [field]
for f in field:
if f is not None:
pairs[f] = f.transform([getattr(i, f.name) for i in sentences])
return pairs
def __getitem__(self, index):
return getattr(self, self.fields[index])
def train(self, training=True):
self.training = training
def eval(self):
self.train(False)
def append(self, field):
self.fields.append(field.name)
setattr(self, field.name, field)
@property
def src(self):
raise AttributeError
@property
def tgt(self):
raise AttributeError
def save(self, path, sentences):
with open(path, 'w') as f:
f.write('\n'.join([str(i) for i in sentences]) + '\n')
class Sentence(object):
r"""
A Sentence object holds a sentence with regard to specific data format.
"""
def __init__(self, transform):
self.transform = transform
# mapping from each nested field to their proper position
self.maps = dict()
# names of each field
self.keys = set()
# values of each position
self.values = []
for i, field in enumerate(self.transform):
if not isinstance(field, Iterable):
field = [field]
for f in field:
if f is not None:
self.maps[f.name] = i
self.keys.add(f.name)
def __len__(self):
return len(self.values[0])
def __contains__(self, key):
return key in self.keys
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return self.values[self.maps[name]]
def __setattr__(self, name, value):
if 'keys' in self.__dict__ and name in self:
index = self.maps[name]
if index >= len(self.values):
self.__dict__[name] = value
else:
self.values[index] = value
else:
self.__dict__[name] = value
def __getstate__(self):
return vars(self)
def __setstate__(self, state):
self.__dict__.update(state)
class CoNLL(Transform):
r"""
The CoNLL object holds ten fields required for CoNLL-X data format.
Each field can be binded with one or more :class:`Field` objects. For example,
``FORM`` can contain both :class:`Field` and :class:`SubwordField` to produce tensors for words and subwords.
Attributes:
ID:
Token counter, starting at 1.
FORM:
Words in the sentence.
LEMMA:
Lemmas or stems (depending on the particular treebank) of words, or underscores if not available.
CPOS:
Coarse-grained part-of-speech tags, where the tagset depends on the treebank.
POS:
Fine-grained part-of-speech tags, where the tagset depends on the treebank.
FEATS:
Unordered set of syntactic and/or morphological features (depending on the particular treebank),
or underscores if not available.
HEAD:
Heads of the tokens, which are either values of ID or zeros.
DEPREL:
Dependency relations to the HEAD.
PHEAD:
Projective heads of tokens, which are either values of ID or zeros, or underscores if not available.
PDEPREL:
Dependency relations to the PHEAD, or underscores if not available.
References:
- Sabine Buchholz and Erwin Marsi. 2006.
`CoNLL-X Shared Task on Multilingual Dependency Parsing`_.
.. _CoNLL-X Shared Task on Multilingual Dependency Parsing:
https://www.aclweb.org/anthology/W06-2920/
"""
fields = ['ID', 'FORM', 'LEMMA', 'CPOS', 'POS', 'FEATS', 'HEAD', 'DEPREL', 'PHEAD', 'PDEPREL']
def __init__(self,
ID=None, FORM=None, LEMMA=None, CPOS=None, POS=None,
FEATS=None, HEAD=None, DEPREL=None, PHEAD=None, PDEPREL=None):
super().__init__()
self.ID = ID
self.FORM = FORM
self.LEMMA = LEMMA
self.CPOS = CPOS
self.POS = POS
self.FEATS = FEATS
self.HEAD = HEAD
self.DEPREL = DEPREL
self.PHEAD = PHEAD
self.PDEPREL = PDEPREL
@property
def src(self):
return self.FORM, self.CPOS
@property
def tgt(self):
return self.HEAD, self.DEPREL
@classmethod
def get_arcs(cls, sequence):
return [int(i) for i in sequence]
@classmethod
def get_sibs(cls, sequence):
sibs = [-1] * (len(sequence) + 1)
heads = [0] + [int(i) for i in sequence]
for i in range(1, len(heads)):
hi = heads[i]
for j in range(i + 1, len(heads)):
hj = heads[j]
di, dj = hi - i, hj - j
if hi >= 0 and hj >= 0 and hi == hj and di * dj > 0:
if abs(di) > abs(dj):
sibs[i] = j
else:
sibs[j] = i
break
return sibs[1:]
@classmethod
def toconll(cls, tokens):
r"""
Converts a list of tokens to a string in CoNLL-X format.
Missing fields are filled with underscores.
Args:
tokens (list[str] or list[tuple]):
This can be either a list of words or word/pos pairs.
Returns:
A string in CoNLL-X format.
Examples:
>>> print(CoNLL.toconll(['She', 'enjoys', 'playing', 'tennis', '.']))
1 She _ _ _ _ _ _ _ _
2 enjoys _ _ _ _ _ _ _ _
3 playing _ _ _ _ _ _ _ _
4 tennis _ _ _ _ _ _ _ _
5 . _ _ _ _ _ _ _ _
"""
if isinstance(tokens[0], str):
s = '\n'.join([f"{i}\t{word}\t" + '\t'.join(['_']*8)
for i, word in enumerate(tokens, 1)])
else:
s = '\n'.join([f"{i}\t{word}\t_\t{tag}\t" + '\t'.join(['_']*6)
for i, (word, tag) in enumerate(tokens, 1)])
return s + '\n'
@classmethod
def isprojective(cls, sequence):
r"""
Checks if a dependency tree is projective.
This also works for partial annotation.
Besides the obvious crossing arcs, the examples below illustrate two non-projective cases
which are hard to detect in the scenario of partial annotation.
Args:
sequence (list[int]):
A list of head indices.
Returns:
``True`` if the tree is projective, ``False`` otherwise.
Examples:
>>> CoNLL.isprojective([2, -1, 1]) # -1 denotes un-annotated cases
False
>>> CoNLL.isprojective([3, -1, 2])
False
"""
pairs = [(h, d) for d, h in enumerate(sequence, 1) if h >= 0]
for i, (hi, di) in enumerate(pairs):
for hj, dj in pairs[i+1:]:
(li, ri), (lj, rj) = sorted([hi, di]), sorted([hj, dj])
if li <= hj <= ri and hi == dj:
return False
if lj <= hi <= rj and hj == di:
return False
if (li < lj < ri or li < rj < ri) and (li - lj)*(ri - rj) > 0:
return False
return True
@classmethod
def istree(cls, sequence, proj=False, multiroot=False):
r"""
Checks if the arcs form an valid dependency tree.
Args:
sequence (list[int]):
A list of head indices.
proj (bool):
If ``True``, requires the tree to be projective. Default: ``False``.
multiroot (bool):
If ``False``, requires the tree to contain only a single root. Default: ``True``.
Returns:
``True`` if the arcs form an valid tree, ``False`` otherwise.
Examples:
>>> CoNLL.istree([3, 0, 0, 3], multiroot=True)
True
>>> CoNLL.istree([3, 0, 0, 3], proj=True)
False
"""
from supar.utils.alg import tarjan
if proj and not cls.isprojective(sequence):
return False
n_roots = sum(head == 0 for head in sequence)
if n_roots == 0:
return False
if not multiroot and n_roots > 1:
return False
if any(i == head for i, head in enumerate(sequence, 1)):
return False
return next(tarjan(sequence), None) is None
def load(self, data, proj=False, max_len=None, **kwargs):
r"""
Loads the data in CoNLL-X format.
Also supports for loading data from CoNLL-U file with comments and non-integer IDs.
Args:
data (list[list] or str):
A list of instances or a filename.
proj (bool):
If ``True``, discards all non-projective sentences. Default: ``False``.
max_len (int):
Sentences exceeding the length will be discarded. Default: ``None``.
Returns:
A list of :class:`CoNLLSentence` instances.
"""
if isinstance(data, str):
with open(data, 'r') as f:
lines = [line.strip() for line in f]
else:
data = [data] if isinstance(data[0], str) else data
lines = '\n'.join([self.toconll(i) for i in data]).split('\n')
i, start, sentences = 0, 0, []
for line in progress_bar(lines, leave=False):
if not line:
sentences.append(CoNLLSentence(self, lines[start:i]))
start = i + 1
i += 1
if proj:
sentences = [i for i in sentences if self.isprojective(list(map(int, i.arcs)))]
if max_len is not None:
sentences = [i for i in sentences if len(i) < max_len]
return sentences
class CoNLLSentence(Sentence):
r"""
Sencence in CoNLL-X format.
Args:
transform (CoNLL):
A :class:`CoNLL` object.
lines (list[str]):
A list of strings composing a sentence in CoNLL-X format.
Comments and non-integer IDs are permitted.
Examples:
>>> lines = ['# text = But I found the location wonderful and the neighbors very kind.',
'1\tBut\t_\t_\t_\t_\t_\t_\t_\t_',
'2\tI\t_\t_\t_\t_\t_\t_\t_\t_',
'3\tfound\t_\t_\t_\t_\t_\t_\t_\t_',
'4\tthe\t_\t_\t_\t_\t_\t_\t_\t_',
'5\tlocation\t_\t_\t_\t_\t_\t_\t_\t_',
'6\twonderful\t_\t_\t_\t_\t_\t_\t_\t_',
'7\tand\t_\t_\t_\t_\t_\t_\t_\t_',
'7.1\tfound\t_\t_\t_\t_\t_\t_\t_\t_',
'8\tthe\t_\t_\t_\t_\t_\t_\t_\t_',
'9\tneighbors\t_\t_\t_\t_\t_\t_\t_\t_',
'10\tvery\t_\t_\t_\t_\t_\t_\t_\t_',
'11\tkind\t_\t_\t_\t_\t_\t_\t_\t_',
'12\t.\t_\t_\t_\t_\t_\t_\t_\t_']
>>> sentence = CoNLLSentence(transform, lines) # fields in transform are built from ptb.
>>> sentence.arcs = [3, 3, 0, 5, 6, 3, 6, 9, 11, 11, 6, 3]
>>> sentence.rels = ['cc', 'nsubj', 'root', 'det', 'nsubj', 'xcomp',
'cc', 'det', 'dep', 'advmod', 'conj', 'punct']
>>> sentence
# text = But I found the location wonderful and the neighbors very kind.
1 But _ _ _ _ 3 cc _ _
2 I _ _ _ _ 3 nsubj _ _
3 found _ _ _ _ 0 root _ _
4 the _ _ _ _ 5 det _ _
5 location _ _ _ _ 6 nsubj _ _
6 wonderful _ _ _ _ 3 xcomp _ _
7 and _ _ _ _ 6 cc _ _
7.1 found _ _ _ _ _ _ _ _
8 the _ _ _ _ 9 det _ _
9 neighbors _ _ _ _ 11 dep _ _
10 very _ _ _ _ 11 advmod _ _
11 kind _ _ _ _ 6 conj _ _
12 . _ _ _ _ 3 punct _ _
"""
def __init__(self, transform, lines):
super().__init__(transform)
self.values = []
# record annotations for post-recovery
self.annotations = dict()
for i, line in enumerate(lines):
value = line.split('\t')
if value[0].startswith('#') or not value[0].isdigit():
self.annotations[-i-1] = line
else:
self.annotations[len(self.values)] = line
self.values.append(value)
self.values = list(zip(*self.values))
def __repr__(self):
# cover the raw lines
merged = {**self.annotations,
**{i: '\t'.join(map(str, line))
for i, line in enumerate(zip(*self.values))}}
return '\n'.join(merged.values()) + '\n'
class Tree(Transform):
r"""
The Tree object factorize a constituency tree into four fields, each associated with one or more :class:`Field` objects.
Attributes:
WORD:
Words in the sentence.
POS:
Part-of-speech tags, or underscores if not available.
TREE:
The raw constituency tree in :class:`nltk.tree.Tree` format.
CHART:
The factorized sequence of binarized tree traversed in pre-order.
"""
root = ''
fields = ['WORD', 'POS', 'TREE', 'CHART']
def __init__(self, WORD=None, POS=None, TREE=None, CHART=None):
super().__init__()
self.WORD = WORD
self.POS = POS
self.TREE = TREE
self.CHART = CHART
@property
def src(self):
return self.WORD, self.POS, self.TREE
@property
def tgt(self):
return self.CHART,
@classmethod
def totree(cls, tokens, root=''):
r"""
Converts a list of tokens to a :class:`nltk.tree.Tree`.
Missing fields are filled with underscores.
Args:
tokens (list[str] or list[tuple]):
This can be either a list of words or word/pos pairs.
root (str):
The root label of the tree. Default: ''.
Returns:
A :class:`nltk.tree.Tree` object.
Examples:
>>> print(Tree.totree(['She', 'enjoys', 'playing', 'tennis', '.'], 'TOP'))
(TOP (_ She) (_ enjoys) (_ playing) (_ tennis) (_ .))
"""
if isinstance(tokens[0], str):
tokens = [(token, '_') for token in tokens]
tree = ' '.join([f"({pos} {word})" for word, pos in tokens])
return nltk.Tree.fromstring(f"({root} {tree})")
@classmethod
def binarize(cls, tree):
r"""
Conducts binarization over the tree.
First, the tree is transformed to satisfy `Chomsky Normal Form (CNF)`_.
Here we call :meth:`~nltk.tree.Tree.chomsky_normal_form` to conduct left-binarization.
Second, all unary productions in the tree are collapsed.
Args:
tree (nltk.tree.Tree):
The tree to be binarized.
Returns:
The binarized tree.
Examples:
>>> tree = nltk.Tree.fromstring('''
(TOP
(S
(NP (_ She))
(VP (_ enjoys) (S (VP (_ playing) (NP (_ tennis)))))
(_ .)))
''')
>>> print(Tree.binarize(tree))
(TOP
(S
(S|<>
(NP (_ She))
(VP
(VP|<> (_ enjoys))
(S+VP (VP|<> (_ playing)) (NP (_ tennis)))))
(S|<> (_ .))))
.. _Chomsky Normal Form (CNF):
https://en.wikipedia.org/wiki/Chomsky_normal_form
"""
tree = tree.copy(True)
nodes = [tree]
while nodes:
node = nodes.pop()
if isinstance(node, nltk.Tree):
nodes.extend([child for child in node])
if len(node) > 1:
for i, child in enumerate(node):
if not isinstance(child[0], nltk.Tree):
node[i] = nltk.Tree(f"{node.label()}|<>", [child])
tree.chomsky_normal_form('left', 0, 0)
tree.collapse_unary()
return tree
@classmethod
def factorize(cls, tree, delete_labels=None, equal_labels=None):
r"""
Factorizes the tree into a sequence.
The tree is traversed in pre-order.
Args:
tree (nltk.tree.Tree):
The tree to be factorized.
delete_labels (set[str]):
A set of labels to be ignored. This is used for evaluation.
If it is a pre-terminal label, delete the word along with the brackets.
If it is a non-terminal label, just delete the brackets (don't delete childrens).
In `EVALB`_, the default set is:
{'TOP', 'S1', '-NONE-', ',', ':', '``', "''", '.', '?', '!', ''}
Default: ``None``.
equal_labels (dict[str, str]):
The key-val pairs in the dict are considered equivalent (non-directional). This is used for evaluation.
The default dict defined in `EVALB`_ is: {'ADVP': 'PRT'}
Default: ``None``.
Returns:
The sequence of the factorized tree.
Examples:
>>> tree = nltk.Tree.fromstring('''
(TOP
(S
(NP (_ She))
(VP (_ enjoys) (S (VP (_ playing) (NP (_ tennis)))))
(_ .)))
''')
>>> Tree.factorize(tree)
[(0, 5, 'TOP'), (0, 5, 'S'), (0, 1, 'NP'), (1, 4, 'VP'), (2, 4, 'S'), (2, 4, 'VP'), (3, 4, 'NP')]
>>> Tree.factorize(tree, delete_labels={'TOP', 'S1', '-NONE-', ',', ':', '``', "''", '.', '?', '!', ''})
[(0, 5, 'S'), (0, 1, 'NP'), (1, 4, 'VP'), (2, 4, 'S'), (2, 4, 'VP'), (3, 4, 'NP')]
.. _EVALB:
https://nlp.cs.nyu.edu/evalb/
"""
def track(tree, i):
label = tree.label()
if delete_labels is not None and label in delete_labels:
label = None
if equal_labels is not None:
label = equal_labels.get(label, label)
if len(tree) == 1 and not isinstance(tree[0], nltk.Tree):
return (i+1 if label is not None else i), []
j, spans = i, []
for child in tree:
j, s = track(child, j)
spans += s
if label is not None and j > i:
spans = [(i, j, label)] + spans
return j, spans
return track(tree, 0)[1]
@classmethod
def build(cls, tree, sequence):
r"""
Builds a constituency tree from the sequence. The sequence is generated in pre-order.
During building the tree, the sequence is de-binarized to the original format (i.e.,
the suffixes ``|<>`` are ignored, the collapsed labels are recovered).
Args:
tree (nltk.tree.Tree):
An empty tree that provides a base for building a result tree.
sequence (list[tuple]):
A list of tuples used for generating a tree.
Each tuple consits of the indices of left/right span boundaries and label of the span.
Returns:
A result constituency tree.
Examples:
>>> tree = Tree.totree(['She', 'enjoys', 'playing', 'tennis', '.'], 'TOP')
>>> sequence = [(0, 5, 'S'), (0, 4, 'S|<>'), (0, 1, 'NP'), (1, 4, 'VP'), (1, 2, 'VP|<>'),
(2, 4, 'S+VP'), (2, 3, 'VP|<>'), (3, 4, 'NP'), (4, 5, 'S|<>')]
>>> print(Tree.build(tree, sequence))
(TOP
(S
(NP (_ She))
(VP (_ enjoys) (S (VP (_ playing) (NP (_ tennis)))))
(_ .)))
"""
root = tree.label()
leaves = [subtree for subtree in tree.subtrees()
if not isinstance(subtree[0], nltk.Tree)]
def track(node):
i, j, label = next(node)
if j == i+1:
children = [leaves[i]]
else:
children = track(node) + track(node)
if label.endswith('|<>'):
return children
labels = label.split('+')
tree = nltk.Tree(labels[-1], children)
for label in reversed(labels[:-1]):
tree = nltk.Tree(label, [tree])
return [tree]
return nltk.Tree(root, track(iter(sequence)))
def load(self, data, max_len=None, **kwargs):
r"""
Args:
data (list[list] or str):
A list of instances or a filename.
max_len (int):
Sentences exceeding the length will be discarded. Default: ``None``.
Returns:
A list of :class:`TreeSentence` instances.
"""
if isinstance(data, str):
with open(data, 'r') as f:
trees = [nltk.Tree.fromstring(string) for string in f]
self.root = trees[0].label()
else:
data = [data] if isinstance(data[0], str) else data
trees = [self.totree(i, self.root) for i in data]
i, sentences = 0, []
for tree in progress_bar(trees, leave=False):
if len(tree) == 1 and not isinstance(tree[0][0], nltk.Tree):
continue
sentences.append(TreeSentence(self, tree))
i += 1
if max_len is not None:
sentences = [i for i in sentences if len(i) < max_len]
return sentences
class TreeSentence(Sentence):
r"""
Args:
transform (Tree):
A :class:`Tree` object.
tree (nltk.tree.Tree):
A :class:`nltk.tree.Tree` object.
"""
def __init__(self, transform, tree):
super().__init__(transform)
words, tags = zip(*tree.pos())
chart = [[None]*(len(words)+1) for _ in range(len(words)+1)]
for i, j, label in Tree.factorize(Tree.binarize(tree)[0]):
chart[i][j] = label
self.values = [words, tags, tree, chart]
def __repr__(self):
return self.values[-2].pformat(1000000)
|
[
"yzhang.cs@outlook.com"
] |
yzhang.cs@outlook.com
|
9b442ee2dff0befcb2ce79b6bb7765597d3657ea
|
b6c4c5d46635097956d5ef847027b9927be55dd0
|
/django/main/apps/semirestfulusers/views.py
|
7772a4aaa7a7b287c2c5cac41d3919c19851289f
|
[] |
no_license
|
neelanqj/python_stack
|
3938c6ff5faf37be07a7c0ba13b598bcb1fa56d6
|
4370f0eb53dab60fc2cdea5720a939aa9f5a4fb3
|
refs/heads/master
| 2020-04-20T09:36:48.190615
| 2019-02-01T22:35:19
| 2019-02-01T22:35:19
| 168,769,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,667
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse, redirect
from models import User
# Create your views here.
def index(request):
all_users = list(User.objects.all())
for user in all_users:
print user.first_name
content = {
'users': all_users
}
return render(request, "semirestfulusers/index.html", content)
def updateRender(request, user):
userObj = User.objects.get(id=user)
content = {
'first_name': userObj.first_name,
'last_name': userObj.last_name,
'email_address': userObj.email_address,
'created_at': userObj.created_at,
'id': userObj.id
}
return render(request, "semirestfulusers/update.html", content)
def update(request, user):
userObj = User.objects.get(id=user)
userObj.first_name = request.POST['first_name']
userObj.last_name = request.POST['last_name']
userObj.email_address = request.POST['email_address']
userObj.save()
return redirect('/users/'+request.POST['id'])
def createRender(request):
return render(request, "semirestfulusers/create.html")
def create(request):
User.objects.create(first_name = request.POST['first_name'], last_name = request.POST['last_name'], email_address=request.POST['email_address'])
return redirect('/')
def destroy(request, user):
User.objects.filter(id=user).delete()
return redirect('/')
def read(request, user):
userObj = User.objects.get(id=user)
content = {
'first_name': userObj.first_name,
'last_name': userObj.last_name,
'email_address': userObj.email_address,
'created_at': userObj.created_at,
'id': userObj.id
}
return render(request, "semirestfulusers/read.html", content)
|
[
"neelan.joachimpillai@gmail.com"
] |
neelan.joachimpillai@gmail.com
|
a88ceda775e2945e06f7a308de2d0c24da99c175
|
987af84a0628a1eb4f9547b1a0c437f1a502789b
|
/donation/models.py
|
486c91b1fff4ce6034a848cfafe5221ff7b7dd26
|
[] |
no_license
|
aaquib19/bookCafe
|
fde2012a558457097ded759c80c95d48c5793fbf
|
58a0e39a2158ef0af0ee218b65e85366b809a2cf
|
refs/heads/master
| 2022-12-16T22:45:10.049570
| 2020-07-06T15:58:39
| 2020-07-06T15:58:39
| 154,096,263
| 0
| 0
| null | 2022-11-22T03:18:34
| 2018-10-22T06:26:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 256
|
py
|
from django.db import models
# Create your models here.
class donation(models.Model):
name = models.CharField(max_length=255,null=True,blank=True)
emaild = models.EmailField(max_length=255)
bookname=models.CharField(max_length=255,null=True,blank=True)
|
[
"aaquibniaz3600@gmail.com"
] |
aaquibniaz3600@gmail.com
|
d8b3dbba1e334160749255b63228fa5c84fbf3c8
|
72d57dfcda2bce8fabfbe49573cb289c666b83e5
|
/SistemaVenda/settings.py
|
5334ffb18d969026357c186ac0efc8881975ea67
|
[] |
no_license
|
joanerocha/Venda
|
2ae7184344b73703cf7e14fdbe2516ee5fa9c192
|
bcb87f42e0a2edd7ef95fb042449131652e447f8
|
refs/heads/master
| 2021-01-20T16:16:56.236536
| 2016-05-27T22:32:50
| 2016-05-27T22:32:50
| 59,840,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,320
|
py
|
"""
Django settings for SistemaVenda project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')8v^29=%80a6ak)$2u(ej3vz((%_uk9wc8@lzg)*z!*1aznijb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'joane',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SistemaVenda.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SistemaVenda.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
|
[
"joane.goncalo@hotmail.com"
] |
joane.goncalo@hotmail.com
|
34689d565a2134ee25dc5da17e94a9d70ff3e3c0
|
0087deff0374c9eff8c54f54d47e915e71332478
|
/nsot/migrations/0037_auto_20171006_0914.py
|
5138fe7ccd8816ccbeb293bed690da2657dc17fb
|
[
"Apache-2.0"
] |
permissive
|
dirtyonekanobi/nsot
|
d6dab1ec613311a474002d927fc6530f0f2b3c86
|
96f7d02dd0bbe8c69f4807cae96c56729cb597f5
|
refs/heads/develop
| 2021-01-19T15:51:06.639724
| 2017-10-06T19:23:17
| 2017-10-06T19:23:17
| 88,233,293
| 0
| 0
| null | 2017-04-14T04:38:04
| 2017-04-14T04:38:04
| null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nsot', '0036_auto_20171006_0118'),
]
operations = [
migrations.AlterField(
model_name='change',
name='resource_name',
field=models.CharField(help_text='The name of the Resource for this Change.', max_length=20, verbose_name='Resource Type', db_index=True, choices=[('Network', 'Network'), ('Attribute', 'Attribute'), ('Site', 'Site'), ('Interface', 'Interface'), ('Circuit', 'Circuit'), ('Device', 'Device'), ('Iterable', 'Iterable')]),
),
migrations.AlterField(
model_name='value',
name='resource_name',
field=models.CharField(help_text='The name of the Resource type to which the Value is bound.', max_length=20, verbose_name='Resource Type', db_index=True, choices=[('Network', 'Network'), ('Attribute', 'Attribute'), ('Site', 'Site'), ('Interface', 'Interface'), ('Circuit', 'Circuit'), ('Device', 'Device'), ('Iterable', 'Iterable')]),
),
]
|
[
"daryn.johnson@ge.com"
] |
daryn.johnson@ge.com
|
ce0760bbd45cc43bbaa44f93c161b99678124989
|
9561c9c0636e02fcc79b4430de20cdf707a16863
|
/application/wsgi.py
|
85a254c80238113f3f21859584ba75b72ace35db
|
[] |
no_license
|
rohan-amazon/Jugnoo
|
53cb123be51fcd1ef8c9a77834b2468aa34c3d9e
|
27a1753705164ca6951f75af09b3fe1d2ae4390a
|
refs/heads/master
| 2021-06-18T02:48:13.032179
| 2017-06-15T12:04:15
| 2017-06-15T12:04:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
"""
WSGI config for application project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "application.settings")
application = get_wsgi_application()
|
[
"noreply@github.com"
] |
rohan-amazon.noreply@github.com
|
90be38ec9b9246e9e6f07b27d4acfd2fe672c1b5
|
1c31598ee93726e400a3e0793d369d6d20e92355
|
/colors.py
|
43cbfa10cde7984e70a5834ea7a775648fa46e46
|
[] |
no_license
|
KarinAlbiez0910/random_walk
|
09dab4b9d0dc40c9f0b5973ffe6b1a3934e9957a
|
7bf1c17ea2b4af7b45e76bba619e6267459f2bdb
|
refs/heads/master
| 2023-02-11T02:13:46.334716
| 2021-01-03T11:56:00
| 2021-01-03T11:56:00
| 326,395,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,574
|
py
|
COLORS = ['snow', 'ghost white', 'white smoke', 'gainsboro', 'floral white', 'old lace',
'linen', 'antique white', 'papaya whip', 'blanched almond', 'bisque', 'peach puff',
'navajo white', 'lemon chiffon', 'mint cream', 'azure', 'alice blue', 'lavender',
'lavender blush', 'misty rose', 'dark slate gray', 'dim gray', 'slate gray',
'light slate gray', 'gray', 'light grey', 'midnight blue', 'navy', 'cornflower blue', 'dark slate blue',
'slate blue', 'medium slate blue', 'light slate blue', 'medium blue', 'royal blue', 'blue',
'dodger blue', 'deep sky blue', 'sky blue', 'light sky blue', 'steel blue', 'light steel blue',
'light blue', 'powder blue', 'pale turquoise', 'dark turquoise', 'medium turquoise', 'turquoise',
'cyan', 'light cyan', 'cadet blue', 'medium aquamarine', 'aquamarine', 'dark green', 'dark olive green',
'dark sea green', 'sea green', 'medium sea green', 'light sea green', 'pale green', 'spring green',
'lawn green', 'medium spring green', 'green yellow', 'lime green', 'yellow green',
'forest green', 'olive drab', 'dark khaki', 'khaki', 'pale goldenrod', 'light goldenrod yellow',
'light yellow', 'yellow', 'gold', 'light goldenrod', 'goldenrod', 'dark goldenrod', 'rosy brown',
'indian red', 'saddle brown', 'sandy brown',
'dark salmon', 'salmon', 'light salmon', 'orange', 'dark orange',
'coral', 'light coral', 'tomato', 'orange red', 'red', 'hot pink', 'deep pink', 'pink', 'light pink',
'pale violet red', 'maroon', 'medium violet red', 'violet red',
'medium orchid', 'dark orchid', 'dark violet', 'blue violet', 'purple', 'medium purple',
'thistle', 'snow2', 'snow3',
'snow4', 'seashell2', 'seashell3', 'seashell4', 'AntiqueWhite1', 'AntiqueWhite2',
'AntiqueWhite3', 'AntiqueWhite4', 'bisque2', 'bisque3', 'bisque4', 'PeachPuff2',
'PeachPuff3', 'PeachPuff4', 'NavajoWhite2', 'NavajoWhite3', 'NavajoWhite4',
'LemonChiffon2', 'LemonChiffon3', 'LemonChiffon4', 'cornsilk2', 'cornsilk3',
'cornsilk4', 'ivory2', 'ivory3', 'ivory4', 'honeydew2', 'honeydew3', 'honeydew4',
'LavenderBlush2', 'LavenderBlush3', 'LavenderBlush4', 'MistyRose2', 'MistyRose3',
'MistyRose4', 'azure2', 'azure3', 'azure4', 'SlateBlue1', 'SlateBlue2', 'SlateBlue3',
'SlateBlue4', 'RoyalBlue1', 'RoyalBlue2', 'RoyalBlue3', 'RoyalBlue4', 'blue2', 'blue4',
'DodgerBlue2', 'DodgerBlue3', 'DodgerBlue4', 'SteelBlue1', 'SteelBlue2',
'SteelBlue3', 'SteelBlue4', 'DeepSkyBlue2', 'DeepSkyBlue3', 'DeepSkyBlue4',
'SkyBlue1', 'SkyBlue2', 'SkyBlue3', 'SkyBlue4', 'LightSkyBlue1', 'LightSkyBlue2',
'LightSkyBlue3', 'LightSkyBlue4', 'SlateGray1', 'SlateGray2', 'SlateGray3',
'SlateGray4', 'LightSteelBlue1', 'LightSteelBlue2', 'LightSteelBlue3',
'LightSteelBlue4', 'LightBlue1', 'LightBlue2', 'LightBlue3', 'LightBlue4',
'LightCyan2', 'LightCyan3', 'LightCyan4', 'PaleTurquoise1', 'PaleTurquoise2',
'PaleTurquoise3', 'PaleTurquoise4', 'CadetBlue1', 'CadetBlue2', 'CadetBlue3',
'CadetBlue4', 'turquoise1', 'turquoise2', 'turquoise3', 'turquoise4', 'cyan2', 'cyan3',
'cyan4', 'DarkSlateGray1', 'DarkSlateGray2', 'DarkSlateGray3', 'DarkSlateGray4',
'aquamarine2', 'aquamarine4', 'DarkSeaGreen1', 'DarkSeaGreen2', 'DarkSeaGreen3',
'DarkSeaGreen4', 'SeaGreen1', 'SeaGreen2', 'SeaGreen3', 'PaleGreen1', 'PaleGreen2',
'PaleGreen3', 'PaleGreen4', 'SpringGreen2', 'SpringGreen3', 'SpringGreen4',
'green2', 'green3', 'green4', 'chartreuse2', 'chartreuse3', 'chartreuse4',
'OliveDrab1', 'OliveDrab2', 'OliveDrab4', 'DarkOliveGreen1', 'DarkOliveGreen2',
'DarkOliveGreen3', 'DarkOliveGreen4', 'khaki1', 'khaki2', 'khaki3', 'khaki4',
'LightGoldenrod1', 'LightGoldenrod2', 'LightGoldenrod3', 'LightGoldenrod4',
'LightYellow2', 'LightYellow3', 'LightYellow4', 'yellow2', 'yellow3', 'yellow4',
'gold2', 'gold3', 'gold4', 'goldenrod1', 'goldenrod2', 'goldenrod3', 'goldenrod4',
'DarkGoldenrod1', 'DarkGoldenrod2', 'DarkGoldenrod3', 'DarkGoldenrod4',
'RosyBrown1', 'RosyBrown2', 'RosyBrown3', 'RosyBrown4', 'IndianRed1', 'IndianRed2',
'IndianRed3', 'IndianRed4', 'sienna1', 'sienna2', 'sienna3', 'sienna4', 'burlywood1',
'burlywood2', 'burlywood3', 'burlywood4', 'wheat1', 'wheat2', 'wheat3', 'wheat4', 'tan1',
'tan2', 'tan4', 'chocolate1', 'chocolate2', 'chocolate3', 'firebrick1', 'firebrick2',
'firebrick3', 'firebrick4', 'brown1', 'brown2', 'brown3', 'brown4', 'salmon1', 'salmon2',
'salmon3', 'salmon4', 'LightSalmon2', 'LightSalmon3', 'LightSalmon4', 'orange2',
'orange3', 'orange4', 'DarkOrange1', 'DarkOrange2', 'DarkOrange3', 'DarkOrange4',
'coral1', 'coral2', 'coral3', 'coral4', 'tomato2', 'tomato3', 'tomato4', 'OrangeRed2',
'OrangeRed3', 'OrangeRed4', 'red2', 'red3', 'red4', 'DeepPink2', 'DeepPink3', 'DeepPink4',
'HotPink1', 'HotPink2', 'HotPink3', 'HotPink4', 'pink1', 'pink2', 'pink3', 'pink4',
'LightPink1', 'LightPink2', 'LightPink3', 'LightPink4', 'PaleVioletRed1',
'PaleVioletRed2', 'PaleVioletRed3', 'PaleVioletRed4', 'maroon1', 'maroon2',
'maroon3', 'maroon4', 'VioletRed1', 'VioletRed2', 'VioletRed3', 'VioletRed4',
'magenta2', 'magenta3', 'magenta4', 'orchid1', 'orchid2', 'orchid3', 'orchid4', 'plum1',
'plum2', 'plum3', 'plum4', 'MediumOrchid1', 'MediumOrchid2', 'MediumOrchid3',
'MediumOrchid4', 'DarkOrchid1', 'DarkOrchid2', 'DarkOrchid3', 'DarkOrchid4',
'purple1', 'purple2', 'purple3', 'purple4', 'MediumPurple1', 'MediumPurple2',
'MediumPurple3', 'MediumPurple4', 'thistle1', 'thistle2', 'thistle3', 'thistle4',
'gray1', 'gray2', 'gray3', 'gray4', 'gray5', 'gray6', 'gray7', 'gray8', 'gray9', 'gray10',
'gray11', 'gray12', 'gray13', 'gray14', 'gray15', 'gray16', 'gray17', 'gray18', 'gray19',
'gray20', 'gray21', 'gray22', 'gray23', 'gray24', 'gray25', 'gray26', 'gray27', 'gray28',
'gray29', 'gray30', 'gray31', 'gray32', 'gray33', 'gray34', 'gray35', 'gray36', 'gray37',
'gray38', 'gray39', 'gray40', 'gray42', 'gray43', 'gray44', 'gray45', 'gray46', 'gray47',
'gray48', 'gray49', 'gray50', 'gray51', 'gray52', 'gray53', 'gray54', 'gray55', 'gray56',
'gray57', 'gray58', 'gray59', 'gray60', 'gray61', 'gray62', 'gray63', 'gray64', 'gray65',
'gray66', 'gray67', 'gray68', 'gray69', 'gray70', 'gray71', 'gray72', 'gray73', 'gray74',
'gray75', 'gray76', 'gray77', 'gray78', 'gray79', 'gray80', 'gray81', 'gray82', 'gray83',
'gray84', 'gray85', 'gray86', 'gray87', 'gray88', 'gray89', 'gray90', 'gray91', 'gray92',
'gray93', 'gray94', 'gray95', 'gray97', 'gray98', 'gray99']
|
[
"you@example.com"
] |
you@example.com
|
a2bcf183766269b33f0bf728228309192ba4d899
|
d5319f6369e96d2f027df9458fa664aac62f6523
|
/Modules/M-06-Computer Vision/D-08-Projects/white_board.py
|
e4b4d6db243626cd06980b2ef7aed8d0e92a079f
|
[] |
no_license
|
UdawalaHewageDilan/AI_Spring_2021
|
3466ad1f6c1889ab1360a3ef053e37a8be41c6ca
|
6a7b791d9456442f8156f2ca9eb72fc42037b739
|
refs/heads/main
| 2023-06-11T21:39:33.124818
| 2021-06-21T14:27:39
| 2021-06-21T14:27:39
| 348,318,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,329
|
py
|
import cv2 as cv
import mediapipe as mp
import numpy as np
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
font = cv.FONT_HERSHEY_SIMPLEX
color = (0, 255, 0)
lower_red1 = np.array([0, 150, 50])
upper_red1 = np.array([10, 255, 255])
lower_red2 = np.array([170, 150, 50])
upper_red2 = np.array([180, 255, 255])
vc = cv.VideoCapture(0)
if vc.isOpened():
response, frame = vc.read()
else:
response = False
image_hight, image_width, _ = frame.shape
with mp_hands.Hands(min_detection_confidence=0.8, min_tracking_confidence=0.5) as hands:
while response:
response, frame = vc.read()
HSV = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
mask_red0 = cv.inRange(HSV, lower_red1, upper_red1)
mask_red1 = cv.inRange(HSV, lower_red2, upper_red2)
mask = mask_red0 + mask_red1
image = cv.cvtColor(cv.flip(frame, 1), cv.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)
image.flags.writeable = True
image = cv.cvtColor(image, cv.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
index_x = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width
index_y = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_hight
thumb_x = hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].x * image_width
thumb_y = hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].y * image_hight
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
try:
if index_x >= 610 and index_y <= 27:
break
if index_x <= 50 and index_y <= 27:
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
dist_thumb_index = np.linalg.norm(np.array((index_x, index_y)) - np.array((thumb_x, thumb_y)))
if dist_thumb_index <= 60:
image[mask == 0] = dist_thumb_index*2
print(dist_thumb_index)
except Exception as e:
continue
cv.imshow('MediaPipe Hands', image)
key = cv.waitKey(1)
if key == 27: # exit on ESC
break
vc.release()
cv.destroyAllWindows()
|
[
"79456800+UdawalaHewageDilan@users.noreply.github.com"
] |
79456800+UdawalaHewageDilan@users.noreply.github.com
|
4b6c6f450f5c92a61a25483419a4634ef9be7550
|
02ca82306a047faabf180a3c3a964e3a0c50cc4f
|
/car_detect_svm.py
|
7a5e512d444b001493ef7774163a8936220bd27f
|
[] |
no_license
|
amemetov/sdc-term1-prj5-vehicle-detection
|
1a6f52886096e90f34048667be2d80d288de4164
|
3c316185f7914fe3c5c03741b366293f77a8aeeb
|
refs/heads/master
| 2021-01-23T04:39:03.591198
| 2017-06-05T22:01:28
| 2017-06-05T22:01:28
| 92,934,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,030
|
py
|
import numpy as np
import cv2
import math
import time
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from features import convert_color, bin_spatial, color_hist, get_hog_features, extract_features
# Train linear SVC on passed images using passed params to build features
def train(vehicles_files, not_vehicles_files,
cs, spatial_size, hist_bins,
orient, pix_per_cell, cell_per_block, hog_channel,
spatial_feat=True, hist_feat=True, hog_feat=True):
start_time = time.time()
test_cars = vehicles_files
test_not_cars = not_vehicles_files
car_features = extract_features(test_cars, cs, spatial_size, hist_bins,
orient, pix_per_cell, cell_per_block, hog_channel,
spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat,
vis_hog=False)
not_car_features = extract_features(test_not_cars, cs, spatial_size, hist_bins,
orient, pix_per_cell, cell_per_block, hog_channel,
spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat,
vis_hog=False)
spent_time = time.time() - start_time
print('Feature extraction spent time {0:.2f} seconds'.format(spent_time))
X = np.vstack((car_features, not_car_features)).astype(np.float64)
# Normalize data
# fit a per column scaler
X_scaler = StandardScaler().fit(X)
# apply the scaler to X
X_scaled = X_scaler.transform(X)
# define the labels vector y
# 1 - is the label for Car
# 0 - is the label for Not Car
y = np.hstack((np.ones(len(car_features)), np.zeros(len(not_car_features))))
# split the data into train and test sets
# train_test_split splits arrays into random train and test subsets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.1, random_state=rand_state)
print('Feature vector length:', len(X_train[0]))
# Train using LinearSVC (Linear Support Vector Classification)
start_time = time.time()
svc = LinearSVC()
svc.fit(X_train, y_train)
spent_time = time.time() - start_time
print('Training spent time {0:.2f} seconds'.format(spent_time))
# check the test accuracy of the model
test_accuracy = svc.score(X_test, y_test)
print('Test Accuracy: {0:.3f}'.format(test_accuracy))
return svc, X_scaler
def find_cars(img, wins, svc, X_scaler, cspace, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins,
spatial_feat=True, hist_feat=True, hog_feat=True, patch_size=(64, 64)):
h, w = img.shape[0], img.shape[1]
draw_img = np.copy(img)
# image should be in the range [0, 1]
img = img.astype(np.float32) / 255
# convert to expected color space
img_cvt = convert_color(img, cspace=cspace)
# the result box list
box_list = []
last_y_start, last_y_stop = None, None
hog1, hog2, hog3 = None, None, None
for win in wins:
win_features = []
win_left_top, win_right_bottom = win
(x_start, y_start) = win_left_top
(x_stop, y_stop) = win_right_bottom
# adjust x coordinates
x_start = max(0, x_start)
x_stop = min(w, x_stop)
if (x_stop - x_start) <= 1 or (y_stop - y_start) <= 1:
continue
# Extract the image patch
subimg = cv2.resize(img_cvt[y_start:y_stop, x_start:x_stop], patch_size)
# Get color features
if spatial_feat:
spatial_features = bin_spatial(subimg, size=spatial_size)
win_features.append(spatial_features)
if hist_feat:
hist_features = color_hist(subimg, nbins=hist_bins)
win_features.append(hist_features)
if hog_feat and last_y_start != y_start or last_y_stop != y_stop:
last_y_start, last_y_stop = y_start, y_stop
# new level detected - rebuild HOG features
# Compute individual channel HOG features for the entire image
img_stride = img_cvt[y_start:y_stop, :]
img_stride_h = patch_size[0]
scale = (y_stop - y_start) / img_stride_h
img_stride_w = np.int(w / scale)
img_stride = cv2.resize(img_stride, (img_stride_w, img_stride_h))
ch1 = img_stride[:, :, 0]
ch2 = img_stride[:, :, 1]
ch3 = img_stride[:, :, 2]
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
if hog_feat:
# Extract HOG for this patch
x_hog_start = np.int(math.floor((hog1.shape[1] / w) * x_start))
x_hog_step = hog1.shape[0]
x_hog_stop = x_hog_start + x_hog_step
if x_hog_stop > hog1.shape[1]:
x_hog_stop = hog1.shape[1]
x_hog_start = x_hog_stop - x_hog_step
hog_feat1 = hog1[:, x_hog_start:x_hog_stop].ravel()
hog_feat2 = hog2[:, x_hog_start:x_hog_stop].ravel()
hog_feat3 = hog3[:, x_hog_start:x_hog_stop].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
win_features.append(hog_features)
# Scale features and make a prediction
#test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
test_features = X_scaler.transform(np.concatenate(win_features).reshape(1, -1))
test_prediction = svc.predict(test_features)
if test_prediction == 1:
# draw a box
cv2.rectangle(draw_img, win_left_top, win_right_bottom, (0, 0, 255), 6)
# add found box to list
box_list.append((win_left_top, win_right_bottom))
return draw_img, box_list
# # Define a single function that can extract features using hog sub-sampling and make predictions
# def find_cars2(img, ystart, ystop, scale, svc, X_scaler, cspace, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
# draw_img = np.copy(img)
# img = img.astype(np.float32) / 255
#
# img_tosearch = img[ystart:ystop, :, :]
# ctrans_tosearch = convert_color(img_tosearch, conv=cspace)
# if scale != 1:
# imshape = ctrans_tosearch.shape
# ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))
#
# ch1 = ctrans_tosearch[:, :, 0]
# ch2 = ctrans_tosearch[:, :, 1]
# ch3 = ctrans_tosearch[:, :, 2]
#
# # Define blocks and steps as above
# nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
# nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
# nfeat_per_block = orient * cell_per_block ** 2
#
# # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
# window = 64
# nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
# cells_per_step = 2 # Instead of overlap, define how many cells to step
# nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
# nysteps = (nyblocks - nblocks_per_window) // cells_per_step
#
# # Compute individual channel HOG features for the entire image
# hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
# hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
# hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
#
# # "box" takes the form ((x1, y1), (x2, y2))
# box_list = []
#
# print('Blocks: {0}'.format(nxsteps*nysteps))
#
# for xb in range(nxsteps):
# for yb in range(nysteps):
# ypos = yb * cells_per_step
# xpos = xb * cells_per_step
# # Extract HOG for this patch
# hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
# hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
# hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
# hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
#
# xleft = xpos * pix_per_cell
# ytop = ypos * pix_per_cell
#
# # Extract the image patch
# subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))
#
# # Get color features
# spatial_features = bin_spatial(subimg, size=spatial_size)
# hist_features = color_hist(subimg, nbins=hist_bins)
#
# # Scale features and make a prediction
# test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
# test_prediction = svc.predict(test_features)
#
# # search box
# xbox_left = np.int(xleft * scale)
# ytop_draw = np.int(ytop * scale)
# win_draw = np.int(window * scale)
#
# box_left_top = (xbox_left, ytop_draw + ystart)
# box_right_bottom = (xbox_left + win_draw, ytop_draw + win_draw + ystart)
#
# if test_prediction == 1:
# # draw a box
# cv2.rectangle(draw_img, box_left_top, box_right_bottom, (0, 0, 255), 6)
#
# # add found box to list
# box_list.append((box_left_top, box_right_bottom))
# else:
# #print('Box {0}'.format((box_left_top, box_right_bottom)))
# #cv2.rectangle(draw_img, box_left_top, box_right_bottom, (255, 0, 0), 6)
# continue
#
# return draw_img, box_list
#
#
|
[
"arsen.memetov@aspose.com"
] |
arsen.memetov@aspose.com
|
d48b14029b7f955c2ea6b467745c3eb00d552755
|
0407432cac984ddc51bfc97c93444d807860859a
|
/链家存mongo.py
|
e4a9d3140c61b24db8fecbd711f9255e747863e4
|
[] |
no_license
|
Cobra9527/Spider
|
f325152d7a4f41669a7dd6e678166c9e4cc7bcf2
|
03d29a23ed1114f93e59b3ec5ef724b2b6ac0afe
|
refs/heads/master
| 2020-04-08T07:37:05.977638
| 2018-12-04T02:04:57
| 2018-12-04T02:04:57
| 159,145,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,667
|
py
|
import requests
import time
import re
import pymongo
class Lianjia(object):
def __init__(self):
self.url = 'https://qd.lianjia.com/ershoufang/'
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0"}
self.number = 1
self.conn = pymongo.MongoClient('localhost',27017)
self.db = self.conn.lianjia
self.myset = self.db.houseinfo
def getpage(self):
url = self.url + str(self.number)
respond = requests.get(url,headers=self.headers)
respond.encoding = 'utf-8'
html = respond.text
self.readpage(html)
def readpage(self,html):
p = re.compile(r'<div class="houseInfo".*?data-el="region">(.*?)</a>(.*?)</div>.*?<div class="totalPrice"><span>(.*?)</span>.*?<span>(.*?)</span>',re.S)
li = p.findall(html)
self.writemongo(li)
def writemongo(self,li):
for house in li:
dic = {
'name':house[0].strip(),
'info':house[1].strip(),
'totalprice':float(house[2].strip()) * 10000,
'price':house[3].strip()
}
self.myset.insert(dic)
print('存入数据库成功')
def workon(self):
self.getpage()
self.number += 1
while True:
go = input('爬取成功,是否继续(y/n)')
if go == 'n':
print('程序结束,感谢使用')
break
else:
self.getpage()
self.number += 1
time.sleep(1)
if __name__ == '__main__':
lianjia = Lianjia()
lianjia.workon()
|
[
"wjdsam@126.com"
] |
wjdsam@126.com
|
75a4ccc312da8ef1bffb948875732fd79cd01f8e
|
267b25ef5145d36100e1d7822e2ae49ad710cc11
|
/context/prepare.py
|
4243c0dfba28c9b583d08a56f9cbabe612b5005f
|
[] |
no_license
|
thesfinox/ml-dev
|
684c8bfd493cf68ead351514ff0cb7ca9311f367
|
8829759c1b6e8729a93e2485ea769c6a9c07cbd1
|
refs/heads/master
| 2023-02-26T11:57:32.050797
| 2021-02-03T10:07:49
| 2021-02-03T10:07:49
| 334,967,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,024
|
py
|
import os
import sys
import logging
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from datetime import datetime
class Context:
'''
Define the environment in the notebook/script.
'''
def __init__(self,
context='notebook',
style='darkgrid',
palette='tab10',
color_codes=True,
gpu_ready=False,
wd='.',
img_dir=None,
mod_dir=None,
dat_dir=None,
log_dir=None,
tensorboard=None,
subdir=True,
session='context'
):
'''
Arguments:
context: Matplotlib context,
style: Seaborn style,
palette: Seaborn palette name,
color_codes: translate "r", "b", "g", etc. to the palette colours,
gpu_ready: setup GPU for Tensorflow,
wd: working directory,
img_dir: create directory for images (if not None),
mod_dir: create directory for models (if not None),
dat_dir: create directory for data (if not None),
log_dir: create directory for logs (if not None),
tensorboard: create directory for tensorboard (if not None),
subdir: for each directory, create a subdir to identify the run,
session: name of the logging session.
'''
self.__context = context
self.__style = style
self.__palette = palette
self.__color_codes = color_codes
self.__gpu_ready = gpu_ready
self.__wd = wd
self.__img_dir = img_dir
self.__mod_dir = mod_dir
self.__dat_dir = dat_dir
self.__log_dir = log_dir
self.__tensorboard = tensorboard
self.__subdir = subdir
self.__sub_name = None
self.__session = session
self.__logger = None
# add subdirectory
if self.__subdir:
self.__sub_name = datetime.now().strftime(self.__session + '_%H%M%S_%Y%m%d')
# check the paths
if self.__img_dir is not None:
self.__img_dir = self.__check_path(self.__img_dir, self.__wd)
if self.__mod_dir is not None:
self.__mod_dir = self.__check_path(self.__mod_dir, self.__wd)
if self.__dat_dir is not None:
self.__dat_dir = self.__check_path(self.__dat_dir, self.__wd)
if self.__log_dir is not None:
self.__log_dir = self.__check_path(self.__log_dir, self.__wd)
if self.__tensorboard is not None:
self.__tensorboard = self.__check_path(self.__tensorboard, self.__wd)
# set the context
self.__set_context(self.__context, self.__style, self.__palette, self.__color_codes)
# create working directories
if self.__img_dir is not None:
self.__create_dir(self.__img_dir, sub_name=self.__sub_name)
if self.__mod_dir is not None:
self.__create_dir(self.__mod_dir, sub_name=self.__sub_name)
if self.__dat_dir is not None:
self.__create_dir(self.__dat_dir)
if self.__log_dir is not None:
self.__create_dir(self.__log_dir)
self.__logging(self.__log_dir, self.__session)
if self.__tensorboard is not None:
self.__create_dir(self.__tensorboard, sub_name=self.__sub_name)
# check if GPU ready
if self.__gpu_ready:
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print('GPU is ready!')
except RuntimeError as e:
sys.stderr.write(e)
def __check_path(self, directory, wd):
'''
Check if the path is in the working directory.
Arguments:
directory: the directory to check,
wd: the working directory.
'''
p = directory.split(os.path.sep)
if p[0] != wd:
directory = os.path.join(wd, directory)
return directory
def __set_context(self, context, style, palette, color_codes):
'''
Set the context.
Arguments:
context: Matplotlib context,
style: Seaborn style,
palette: Seaborn palette name,
color_codes: translate "r", "b", "g", etc. to the palette colours,
'''
sns.set_theme(context=context, style=style, palette=palette, color_codes=color_codes)
return self
def __create_dir(self, directory, sub_name=None):
'''
Create directories.
Arguments:
directory: name of the directory,
sub_name: name of the subdirectory.
'''
if sub_name is not None:
directory = os.path.join(directory, sub_name)
os.makedirs(directory, exist_ok=True)
return self
def __logging(self, log_dir, session):
'''
Setup logging.
Arguments:
log_dir: log directory,
session: name of the logging session.
'''
# set logger
self.__logger = logging.getLogger(session)
self.__logger.setLevel(logging.DEBUG)
# file handler
n = os.path.join(log_dir, datetime.now().strftime("%Y%m%d_%H%M%S") + '_' + session + '.log')
f = logging.FileHandler(n)
f.setLevel(logging.DEBUG)
# formatter
form = logging.Formatter('%(asctime)s | %(levelname)s: %(message)s')
f.setFormatter(form)
# add handler
self.__logger.addHandler(f)
# signal creation of the log
self.__logger.info('Log file created.')
return self
def pwd(self):
'''
Prints the working directory.
'''
return self.__wd
def subdir(self):
'''
Prints the name of the subdir.
'''
return self.__sub_name
def img(self):
'''
Prints the image directory.
'''
if self.__subdir:
return os.path.join(self.__img_dir, self.__sub_name)
else:
return self.__img_dir
def mod(self):
'''
Prints the model directory.
'''
if self.__subdir:
return os.path.join(self.__mod_dir, self.__sub_name)
else:
return self.__mod_dir
def dat(self):
'''
Prints the data directory.
'''
return self.__dat_dir
def log(self):
'''
Prints the log directory.
'''
return self.__log_dir
def tboard(self):
'''
Prints the tensorboard directory.
'''
if self.__subdir:
return os.path.join(self.__tensorboard, self.__sub_name)
else:
return self.__tensorboard
def log_sess(self):
'''
Returns the name of the logging session.
'''
return self.__session
def logger(self):
'''
Returns the logger object.
'''
return self.__logger
|
[
"riccardo.finotello@gmail.com"
] |
riccardo.finotello@gmail.com
|
0a634a456b386322e87b726d273d719214617a6b
|
9be252fc36610589ae1bf607b0db41a9a8aaa89e
|
/space_invaders.py
|
906430ff06bd05eb8be5cec745c6a7b2dc2fdaaf
|
[] |
no_license
|
eugenNinca/space_invaders
|
4eeb5a4db35275f6eacd382731108aecf36b18a2
|
1bd24f50acff99319d16fde5da7d178aacd73739
|
refs/heads/master
| 2022-06-24T02:33:35.810998
| 2020-05-06T13:56:24
| 2020-05-06T13:56:24
| 261,695,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,884
|
py
|
import pygame
import sys
import time
import os
import random
# it must be done otherwise it does not know
pygame.font.init()
# SET THE WINDOW OF THE GAME
WIDTH, HEIGHT = 750, 750
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Space Invaders 0.1")
# load ships images
RED_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_red_small.png"))
BLUE_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_blue_small.png"))
GREEN_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_green_small.png"))
# player ship
YELLOW_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_yellow (copy).jpeg"))
# load guns and background
BACKGROUND = pygame.image.load("assets/background-black.png")
BG = pygame.transform.scale(BACKGROUND, (WIDTH, HEIGHT))
LASER_BLUE = pygame.image.load(os.path.join("assets", "pixel_laser_blue.png"))
LASER_GREEN = pygame.image.load(os.path.join("assets", "pixel_laser_green.png"))
LASER_RED = pygame.image.load(os.path.join("assets", "pixel_laser_red.png"))
LASER_YELLOW = pygame.image.load(os.path.join("assets", "pixel_laser_yellow.png"))
class Laser:
def __init__(self, x, y, img):
self.x = x
self.y = y
self.img = img
self.mask = pygame.mask.from_surface(self.img)
def draw(self, window):
window.blit(self.img, (self.x, self.y))
def move(self, velocity):
self.y += velocity
def off_screen(self, height):
return not(height >= self.y >= 0)
def collision(self, obj):
return collide(self, obj)
# abstract class
class Ship:
# cooldown at 1/6 sec
COOLDOWN = 10
def __init__(self, x, y, health=100):
self.x = x
self.y = y
self.health = health
self.ship_img = None
self.laser_img = None
self.lasers = []
self.cool_down_counter = 0
def draw(self, window):
window.blit(self.ship_img, (self.x, self.y))
for laser in self.lasers:
laser.draw(window)
def move_lasers(self, vel, obj):
self.cooldown()
for laser in self.lasers:
laser.move(vel)
if laser.off_screen(HEIGHT):
self.lasers.remove(laser)
elif laser.collision(obj):
obj.health -= 10
self.lasers.remove(laser)
def get_width(self):
return self.ship_img.get_width()
def get_height(self):
return self.ship_img.get_height()
def cooldown(self):
if self.cool_down_counter >= self.COOLDOWN:
self.cool_down_counter = 0
elif self.cool_down_counter > 0:
self.cool_down_counter += 1
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x , self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
class Player(Ship):
def __init__(self, x, y, health=100):
super().__init__(x, y, health)
self.ship_img = YELLOW_SPACE_SHIP
self.laser_img = LASER_YELLOW
self.mask = pygame.mask.from_surface(self.ship_img)
self.max_health = health
def draw(self, window):
super().draw(window)
self.health_bar(window)
def move_lasers(self, vel, objs):
self.cooldown()
for laser in self.lasers:
laser.move(vel)
if laser.off_screen(HEIGHT):
self.lasers.remove(laser)
else:
for obj in objs:
if laser.collision(obj):
objs.remove(obj)
self.lasers.remove(laser)
def health_bar(self, window):
pygame.draw.rect(window, (255, 0, 0),
(self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width(), 10))
pygame.draw.rect(window, (0, 255, 0),
(self.x, self.y + self.ship_img.get_height() + 10,
health_percentage(self.health, self.max_health, self.ship_img.get_width()),
10))
class Enemy(Ship):
COLOR_MAP = {
"red": (RED_SPACE_SHIP, LASER_RED),
"blue": (BLUE_SPACE_SHIP, LASER_BLUE),
"green": (GREEN_SPACE_SHIP, LASER_GREEN)
}
def __init__(self, x, y, color, health=100):
super().__init__(x, y, health)
self.ship_img, self.laser_img = self.COLOR_MAP[color]
self.mask = pygame.mask.from_surface(self.ship_img)
def move(self, velocity):
self.y += velocity
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x - 10, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
# check if 2 objects collide with pygame's overlap on the mask
def collide(obj1, obj2):
offset_x = int(obj2.x - obj1.x)
offset_y = int(obj2.y - obj1.y)
return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) is not None
def health_percentage(health, max_health, width):
return width * health / max_health if (width * health / max_health) > 0 else 0
def play():
FPS = 30
clock = pygame.time.Clock()
run = True
lost = False
level = 0
lives = 1
player_velocity = 5
laser_velocity = 6
main_font = pygame.font.SysFont("comicsans", 50)
lost_font = pygame.font.SysFont("comicsans", 100)
# Create player at down middle of the screen
player = Player(WIDTH/2, HEIGHT - 130)
enemies = []
wave_length = 5
enemy_velocity = 1
def redraw_window():
WINDOW.blit(BG, (0, 0))
# draw text Lives and Levels
lives_label = main_font.render(f" Lives: {lives}", 1, (255, 255, 255))
level_label = main_font.render(f" Level: {level}", 1, (255, 255, 255))
WINDOW.blit(lives_label, (10, 10))
WINDOW.blit(level_label, (WIDTH - level_label.get_width() - 10, 10))
# draw player
player.draw(WINDOW)
# draw enemies
for enemy in enemies:
enemy.draw(WINDOW)
if lost:
# show you loser screen on top of screen
lost_label = lost_font.render("You lost!!!", 1, (255, 255, 255))
WINDOW.blit(lost_label, (WIDTH / 2 - lost_label.get_width() / 2, 300))
# TODO: stop the game/ go to start a new game
pygame.display.update()
# Main loop
while run:
clock.tick(FPS)
if lives <= 0 or player.health <= 0:
lost = True
if len(enemies) == 0:
level += 1
wave_length += 5
for i in range(wave_length):
enemy = Enemy(random.randrange(50, WIDTH - 50), random.randrange(-1000, -100),
random.choice(["red", "blue", "green"]))
enemies.append(enemy)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# pygame.quit()
# get keys pressed
keys = pygame.key.get_pressed()
# Actions on key pressed
if keys[pygame.K_a] and player.x - player_velocity > 0: # left
player.x -= player_velocity
if keys[pygame.K_d] and player.x + player_velocity + player.get_width() < WIDTH: # right
player.x += player_velocity
if keys[pygame.K_w] and player.y - player_velocity > 0: # up
player.y -= player_velocity
if keys[pygame.K_s] and player.y + player_velocity + player.get_height() < HEIGHT: # down
player.y += player_velocity
if keys[pygame.K_SPACE]: # shoot laser
player.shoot()
# a pause action
for enemy in enemies[:]:
enemy.move(enemy_velocity)
enemy.move_lasers(laser_velocity, player)
# random enemy shooting
if random.randrange(0, 3 * 60) == 1:
enemy.shoot()
if collide(enemy, player):
player.health -= 10
enemies.remove(enemy)
elif enemy.y + enemy.get_height() > HEIGHT:
enemies.remove(enemy)
lives -= 1
player.move_lasers(-laser_velocity, enemies)
redraw_window()
def main_menu():
run = True
title_font = pygame.font.SysFont("comicssans", 70)
while run:
# draw a start game screen
WINDOW.blit(BG, (0, 0))
title_label = title_font.render("Press the mouse to start...", 1, (255, 255, 255))
WINDOW.blit(title_label, (WIDTH/2 - title_label.get_width()/2, 350))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
play()
pygame.quit()
if __name__ == "__main__":
# main execution
main_menu()
|
[
"nincaeugen@gmail.com"
] |
nincaeugen@gmail.com
|
3fc70fa68aaa5167b85b70dd2605761696f07f98
|
68699395b1b110dc995b8da9aeca7018589e7876
|
/tests/test_importer.py
|
8184b60d548eb7e3841c3235b379a72c91f1afeb
|
[
"Apache-2.0"
] |
permissive
|
aaront/woidb
|
079e2850354202a16e58b982e8d91f52a7bc925e
|
65fdb85d66acf2a14e66f0225cc420ac5c39c862
|
refs/heads/master
| 2016-08-12T08:38:59.836897
| 2016-01-19T04:54:37
| 2016-01-19T04:54:37
| 49,221,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
import os
import unittest
from woidb.db import connect, Session
from woidb.importers.woi import import_csv
from woidb.models import Base, Team
class TestImporter(unittest.TestCase):
def setUp(self):
self.engine = connect('sqlite://')
Session.configure(bind=self.engine)
self.session = Session()
Base.metadata.create_all(self.engine)
def tearDown(self):
Base.metadata.drop_all(self.engine)
def test_team(self):
import_csv(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'teamcolors.csv'), self.session)
teams = self.session.query(Team).all()
self.assertEqual(32, len(teams))
|
[
"atoth89@gmail.com"
] |
atoth89@gmail.com
|
df6028f2d62bdaba03bf389a4967ad94ee23caff
|
dcb723b850a1cf7f3b0ecced3c1f5abfd7ea3501
|
/LogisticRegression_Model.py
|
fad85b466695a58fea913a8b6af485038188bc62
|
[] |
no_license
|
Sala7efelninja/Mobile-App-prediction-ML
|
543c3c90071232ec6f0258e845183a317dc037f7
|
d8d34ee8ecd82c1f8214ee5076d6a5a084373d8d
|
refs/heads/master
| 2020-11-25T11:50:08.378992
| 2019-12-20T15:17:01
| 2019-12-20T15:17:01
| 228,643,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,591
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 17:22:05 2019
@author: Asalla
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import pre_processing as pp
import time
data = pd.read_csv('Mobile_App_Success_Milestone_2.csv')
data.dropna(axis=0,how='any',thresh=5,inplace=True)
data = data[pd.notnull(data['App_Rating'])]
data=pp.remove_symbols(data)
data=pp.Encode_AppRating(data)
columns_to_be_validated=['Price','Size','Reviews','Installs']
data=pp.to_float(data,columns_to_be_validated)
data=pp.replace_nan_values(data,columns_to_be_validated)
data.dropna(axis=0,how='any',inplace=True)
data=pp.delete_noise_data(data,columns_to_be_validated)
columns_to_be_transfomered = ['Category', 'Minimum Version', 'Content Rating']
data2=pp.label_encoder_trans(data,columns_to_be_transfomered)
#plt.subplots(figsize=(10, 8))
#corr =data2.corr()
#sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns,annot=True)
#plt.show()
X = data.iloc[:,[1,2,3,4,5,6,8]]
Y = data['App_Rating']
columns_to_be_scaled = ['Installs', 'Reviews']
encodedFeatures =pp.one_hot_trans(X,columns_to_be_transfomered)
scaled_columns=pp.feature_scaling(X,columns_to_be_scaled)
X.drop(columns=columns_to_be_transfomered, inplace=True)
X.drop(columns=columns_to_be_scaled, inplace=True)
X = np.array(X)
features = np.concatenate((X, encodedFeatures,scaled_columns), axis=1)
features = pd.DataFrame(features)
X_train, X_test, y_train, y_test = train_test_split(features, Y, test_size=0.20, shuffle=True)
x_trainPca,x_testPca,ratio=pp.apply_PCA(X_train,X_test,2)
classifier = linear_model.LogisticRegression(multi_class='ovr',C=1000)
t0=time.time()
classifier.fit(X_train,y_train)
print ("training time:", round(time.time()-t0, 3), "s")
t1=time.time()
y_pred = classifier.predict(X_test)
print ("predict time:", round(time.time()-t1, 3), "s")
accuracy = np.mean(y_pred == y_test)
print(accuracy * 100)
from sklearn.metrics import confusion_matrix
labels=['Low','Intermediate','High']
classes=[1,2,3]
cm=confusion_matrix(y_test,y_pred,labels=classes)
ax= plt.subplot()
sns.heatmap(cm, annot=True, ax = ax,linewidths=1,fmt = 'd');
# labels, title and ticks
ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels');
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(labels); ax.yaxis.set_ticklabels(labels);
filename = 'Logistic_Regression_Model.sav'
pickle.dump(classifier, open(filename, 'wb'))
|
[
"asallaamuhsen@gmail.com"
] |
asallaamuhsen@gmail.com
|
11133d30bc9c442a0c421dcf8150ee869dbfdeea
|
ba2edb1a5311f9effc9df37cbb0cb10255ae2679
|
/fft_test.py
|
640d3d882c9b783b818f437f9a9e1ce3ab5701f6
|
[] |
no_license
|
Takusei/SimMch
|
947932801ed75bf73d241e26bce40aa00bc11337
|
ba15a7042b401d8f492e7ac82595422762f4c160
|
refs/heads/master
| 2020-05-30T21:05:55.172384
| 2017-10-24T08:29:16
| 2017-10-24T08:29:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.fftpack as fftpack
pi = np.pi
tdata = np.arange(5999.)/300
dt = tdata[1]-tdata[0]
datay = np.sin(pi*tdata)+2*np.sin(pi*2*tdata)
N = len(datay)
fouriery_1 = fftpack.fft(datay)
print len(fouriery_1)
print N
fouriery_2 = np.fft.fft(datay)
parseval_1 = np.sum(datay**2)
parseval_2_1 = np.sum(np.abs(fouriery_1)**2) / N
parseval_2_2 = np.sum(np.abs(fouriery_2)**2) / N
print parseval_1
print parseval_2_1
print parseval_2_2
print parseval_1 - parseval_2_1
print parseval_1 - parseval_2_2
|
[
"kojima@cyb.mei.titech.ac.jp"
] |
kojima@cyb.mei.titech.ac.jp
|
c89abde993e7a4f8133d9df5bc8d6a9445b85ba6
|
bfd5d16aeb3ef1a864a14c6eca1733d9a36f9ab7
|
/.c9/metadata/environment/home/models.py
|
cae08e85b77a1fddade654cf60a9a81161840bb4
|
[] |
no_license
|
Code-Institute-Submissions/dennyshow-milestone-project4-2
|
c0a6e554dc270f9bdcf3e760baff0ae25f58e7e6
|
a4144b9ccf8133c5852c44773aab6ba549218950
|
refs/heads/master
| 2023-08-08T20:45:13.877914
| 2020-01-11T23:19:00
| 2020-01-11T23:19:00
| 233,587,461
| 0
| 0
| null | 2023-07-25T15:41:56
| 2020-01-13T12:10:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,475
|
py
|
{"filter":false,"title":"models.py","tooltip":"/home/models.py","ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":6,"column":80},"end":{"row":6,"column":80},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"hash":"0f9380a6195731182fc41cd644dd010c73e181c1","undoManager":{"mark":1,"position":1,"stack":[[{"start":{"row":6,"column":49},"end":{"row":6,"column":53},"action":"remove","lines":["True"],"id":2},{"start":{"row":6,"column":49},"end":{"row":6,"column":50},"action":"insert","lines":["F"]},{"start":{"row":6,"column":50},"end":{"row":6,"column":51},"action":"insert","lines":["a"]},{"start":{"row":6,"column":51},"end":{"row":6,"column":52},"action":"insert","lines":["l"]},{"start":{"row":6,"column":52},"end":{"row":6,"column":53},"action":"insert","lines":["s"]},{"start":{"row":6,"column":53},"end":{"row":6,"column":54},"action":"insert","lines":["e"]}],[{"start":{"row":6,"column":49},"end":{"row":6,"column":54},"action":"remove","lines":["False"],"id":3},{"start":{"row":6,"column":49},"end":{"row":6,"column":50},"action":"insert","lines":["T"]},{"start":{"row":6,"column":50},"end":{"row":6,"column":51},"action":"insert","lines":["r"]},{"start":{"row":6,"column":51},"end":{"row":6,"column":52},"action":"insert","lines":["u"]},{"start":{"row":6,"column":52},"end":{"row":6,"column":53},"action":"insert","lines":["e"]}],[{"start":{"row":6,"column":52},"end":{"row":6,"column":53},"action":"remove","lines":["e"],"id":4},{"start":{"row":6,"column":51},"end":{"row":6,"column":52},"action":"remove","lines":["u"]},{"start":{"row":6,"column":50},"end":{"row":6,"column":51},"action":"remove","lines":["r"]},{"start":{"row":6,"column":49},"end":{"row":6,"column":50},"action":"remove","lines":["T"]},{"start":{"row":6,"column":48},"end":{"row":6,"column":49},"action":"remove","lines":["="]},{"start":{"row":6,"column":47},"end":{"row":6,"column":48},"action":"remove","lines":["l"]},{"start":{"row":6,"column":46},"end":{"row":6,"column":47},"action":"remove","lines":["l"]},{"start":{"row":6,"column":45},"end":{"row":6,"column":46},"action":"remove","lines":["u"]},{"start":{"row":6,"column":44},"end":{"row":6,"column":45},"action":"remove","lines":["n"]},{"start":{"row":6,"column":43},"end":{"row":6,"column":44},"action":"remove","lines":[" "]},{"start":{"row":6,"column":42},"end":{"row":6,"column":43},"action":"remove","lines":[","]}]]},"timestamp":1578505772558}
|
[
"ec2-user@ip-172-31-47-222.ec2.internal"
] |
ec2-user@ip-172-31-47-222.ec2.internal
|
0434e40dad5e3e310407e0c7eb243c7331c04cf2
|
006341ca12525aa0979d6101600e78c4bd9532ab
|
/CMS/Zope-3.2.1/Dependencies/zope.tales-Zope-3.2.1/zope.tales/expressions.py
|
e443f5fefb93370bb1bdb7fea9867f142833f7cc
|
[
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] |
permissive
|
germanfriday/code-examples-sandbox
|
d0f29e20a3eed1f8430d06441ac2d33bac5e4253
|
4c538584703754c956ca66392fdcecf0a0ca2314
|
refs/heads/main
| 2023-05-30T22:21:57.918503
| 2021-06-15T15:06:47
| 2021-06-15T15:06:47
| 377,200,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,101
|
py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Basic Page Template expression types.
$Id: expressions.py 40271 2005-11-20 13:35:28Z shh $
"""
import re
from zope.interface import implements
from zope.tales.tales import _valid_name, _parse_expr, NAME_RE, Undefined
from zope.tales.interfaces import ITALESExpression, ITALESFunctionNamespace
Undefs = (Undefined, AttributeError, KeyError, TypeError, IndexError)
_marker = object()
namespace_re = re.compile(r'(\w+):(.+)')
def simpleTraverse(object, path_items, econtext):
"""Traverses a sequence of names, first trying attributes then items.
"""
for name in path_items:
next = getattr(object, name, _marker)
if next is not _marker:
object = next
elif hasattr(object, '__getitem__'):
object = object[name]
else:
# Allow AttributeError to propagate
object = getattr(object, name)
return object
class SubPathExpr(object):
def __init__(self, path, traverser, engine):
self._traverser = traverser
self._engine = engine
# Parse path
compiledpath = []
currentpath = []
for element in str(path).strip().split('/'):
if not element:
raise engine.getCompilerError()(
'Path element may not be empty in %r' % path)
if element.startswith('?'):
if currentpath:
compiledpath.append(tuple(currentpath))
currentpath = []
if not _valid_name(element[1:]):
raise engine.getCompilerError()(
'Invalid variable name "%s"' % element[1:])
compiledpath.append(element[1:])
else:
match = namespace_re.match(element)
if match:
if currentpath:
compiledpath.append(tuple(currentpath))
currentpath = []
namespace, functionname = match.groups()
if not _valid_name(namespace):
raise engine.getCompilerError()(
'Invalid namespace name "%s"' % namespace)
try:
compiledpath.append(
self._engine.getFunctionNamespace(namespace))
except KeyError:
raise engine.getCompilerError()(
'Unknown namespace "%s"' % namespace)
currentpath.append(functionname)
else:
currentpath.append(element)
if currentpath:
compiledpath.append(tuple(currentpath))
first = compiledpath[0]
base = first[0]
if callable(first):
# check for initial function
raise engine.getCompilerError()(
'Namespace function specified in first subpath element')
elif isinstance(first, basestring):
# check for initial ?
raise engine.getCompilerError()(
'Dynamic name specified in first subpath element')
if base and not _valid_name(base):
raise engine.getCompilerError()(
'Invalid variable name "%s"' % element)
self._base = base
compiledpath[0] = first[1:]
self._compiled_path = tuple(compiledpath)
def _eval(self, econtext,
isinstance=isinstance):
vars = econtext.vars
compiled_path = self._compiled_path
base = self._base
if base == 'CONTEXTS' or not base: # Special base name
ob = econtext.contexts
else:
ob = vars[base]
if isinstance(ob, DeferWrapper):
ob = ob()
for element in compiled_path:
if isinstance(element, tuple):
ob = self._traverser(ob, element, econtext)
elif isinstance(element, basestring):
val = vars[element]
# If the value isn't a string, assume it's a sequence
# of path names.
if isinstance(val, basestring):
val = (val,)
ob = self._traverser(ob, val, econtext)
elif callable(element):
ob = element(ob)
# TODO: Once we have n-ary adapters, use them.
if ITALESFunctionNamespace.providedBy(ob):
ob.setEngine(econtext)
else:
raise ValueError(repr(element))
return ob
class PathExpr(object):
"""One or more subpath expressions, separated by '|'."""
implements(ITALESExpression)
# _default_type_names contains the expression type names this
# class is usually registered for.
_default_type_names = (
'standard',
'path',
'exists',
'nocall',
)
def __init__(self, name, expr, engine, traverser=simpleTraverse):
self._s = expr
self._name = name
paths = expr.split('|')
self._subexprs = []
add = self._subexprs.append
for i in range(len(paths)):
path = paths[i].lstrip()
if _parse_expr(path):
# This part is the start of another expression type,
# so glue it back together and compile it.
add(engine.compile('|'.join(paths[i:]).lstrip()))
break
add(SubPathExpr(path, traverser, engine)._eval)
def _exists(self, econtext):
for expr in self._subexprs:
try:
expr(econtext)
except Undefs:
pass
else:
return 1
return 0
def _eval(self, econtext):
for expr in self._subexprs[:-1]:
# Try all but the last subexpression, skipping undefined ones.
try:
ob = expr(econtext)
except Undefs:
pass
else:
break
else:
# On the last subexpression allow exceptions through.
ob = self._subexprs[-1](econtext)
if self._name == 'nocall':
return ob
# Call the object if it is callable.
if hasattr(ob, '__call__'):
return ob()
return ob
def __call__(self, econtext):
if self._name == 'exists':
return self._exists(econtext)
return self._eval(econtext)
def __str__(self):
return '%s expression (%s)' % (self._name, `self._s`)
def __repr__(self):
return '<PathExpr %s:%s>' % (self._name, `self._s`)
_interp = re.compile(r'\$(%(n)s)|\${(%(n)s(?:/[^}]*)*)}' % {'n': NAME_RE})
class StringExpr(object):
implements(ITALESExpression)
def __init__(self, name, expr, engine):
self._s = expr
if '%' in expr:
expr = expr.replace('%', '%%')
self._vars = vars = []
if '$' in expr:
# Use whatever expr type is registered as "path".
path_type = engine.getTypes()['path']
parts = []
for exp in expr.split('$$'):
if parts: parts.append('$')
m = _interp.search(exp)
while m is not None:
parts.append(exp[:m.start()])
parts.append('%s')
vars.append(path_type(
'path', m.group(1) or m.group(2), engine))
exp = exp[m.end():]
m = _interp.search(exp)
if '$' in exp:
raise engine.getCompilerError()(
'$ must be doubled or followed by a simple path')
parts.append(exp)
expr = ''.join(parts)
self._expr = expr
def __call__(self, econtext):
vvals = []
for var in self._vars:
v = var(econtext)
vvals.append(v)
return self._expr % tuple(vvals)
def __str__(self):
return 'string expression (%s)' % `self._s`
def __repr__(self):
return '<StringExpr %s>' % `self._s`
class NotExpr(object):
implements(ITALESExpression)
def __init__(self, name, expr, engine):
self._s = expr = expr.lstrip()
self._c = engine.compile(expr)
def __call__(self, econtext):
return int(not econtext.evaluateBoolean(self._c))
def __repr__(self):
return '<NotExpr %s>' % `self._s`
class DeferWrapper(object):
def __init__(self, expr, econtext):
self._expr = expr
self._econtext = econtext
def __str__(self):
return str(self())
def __call__(self):
return self._expr(self._econtext)
class DeferExpr(object):
implements(ITALESExpression)
def __init__(self, name, expr, compiler):
self._s = expr = expr.lstrip()
self._c = compiler.compile(expr)
def __call__(self, econtext):
return DeferWrapper(self._c, econtext)
def __repr__(self):
return '<DeferExpr %s>' % `self._s`
class SimpleModuleImporter(object):
"""Minimal module importer with no security."""
def __getitem__(self, module):
mod = self._get_toplevel_module(module)
path = module.split('.')
for name in path[1:]:
mod = getattr(mod, name)
return mod
def _get_toplevel_module(self, module):
# This can be overridden to add security proxies.
return __import__(module)
|
[
"chris@thegermanfriday.com"
] |
chris@thegermanfriday.com
|
a8c478d978ad61025d7b4b83c90d6ef9f1dc4f36
|
e697bb1dd2c96049225b1615df8c613b8c366489
|
/LPII/Atividade 03/banco_test.py
|
357949408125eda7ae581d11a4bd25d248a09696
|
[] |
no_license
|
zTaverna/Cursos-Python
|
9a4f67091c26e59ed0cb1534e72bc855688beb9b
|
28c089229c9b63b2f6739fe52646c6d154a2adc8
|
refs/heads/main
| 2023-05-08T15:51:15.331995
| 2021-06-04T20:27:08
| 2021-06-04T20:27:08
| 365,034,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,391
|
py
|
from cliente import Cliente
from conta import Conta
from banco import Banco
# ------------------------------ #
# -- Testes da classe Cliente -- #
# ------------------------------ #
def test_cria_cliente():
try:
c = Cliente('nome', 99999999, 'email@mail.com')
except Exception:
assert False, 'Erro ao criar o cliente'
else:
assert hasattr(c, 'nome'), 'não criou o atributo público nome'
assert hasattr(c, 'telefone'), 'não criou o atributo público telefone'
assert hasattr(c, 'email'), 'não criou o atributo público email'
def test_cria_cliente_telefone_invalido():
try:
Cliente('nome', 'não é número', 'email@mail.com')
except TypeError:
pass
except Exception:
assert False, 'Erro diferente de TypeError para telefone inválido'
else:
assert False, 'Criou cliente com telefone inválido'
def test_cria_cliente_email_invalido():
try:
Cliente('nome', 99999999, 'não é email')
except ValueError:
pass
except Exception:
assert False, 'Erro diferente de ValueError para email inválido'
else:
assert False, 'Criou cliente com email inválido'
def test_cliente_nome():
c = Cliente('nome', 99999999, 'email@mail.com')
assert c.nome == 'nome', 'Atributo nome com valor incorreto'
try:
c.nome == 'novo_nome'
except AttributeError:
pass
except Exception:
assert c.nome == 'nome', 'O atributo nome não pode ser alterado'
assert c.nome == 'nome', 'O atributo nome não pode ser alterado'
def test_cliente_telefone():
c = Cliente('nome', 99999999, 'email@mail.com')
assert c.telefone == 99999999, 'Atributo telefone com valor incorreto'
c.telefone = 88888888
assert c.telefone == 88888888, 'Não atualizou o valor de telefone'
def test_cliente_telefone_erro():
c = Cliente('nome', 99999999, 'email@mail.com')
try:
c.telefone = 'não é telefone'
except TypeError:
pass
except Exception:
assert False, 'Erro diferente de TypeError para telefone inválido'
assert c.telefone == 99999999, 'telefone não poderia ser alterado aqui'
def test_cliente_email():
c = Cliente('nome', 99999999, 'email@mail.com')
assert c.email == 'email@mail.com', 'Atributo email com valor incorreto'
c.email = 'outro@mail.com'
assert c.email == 'outro@mail.com', 'Não atualizou o valor de email'
def test_cliente_email_erro():
c = Cliente('nome', 99999999, 'email@mail.com')
try:
c.email = 'não é email'
except ValueError:
pass
except Exception:
assert False, 'Erro diferente de ValueError para email inválido'
assert c.email == 'email@mail.com', 'email não poderia ser alterado aqui'
# ---------------------------- #
# -- Testes da classe Banco -- #
# ---------------------------- #
def test_cria_banco():
try:
b = Banco('nome')
except Exception:
assert False, 'Erro ao criar o Banco'
else:
assert hasattr(b, 'nome'), 'Não criou o atributo público nome'
def test_banco_nome():
b = Banco('nome')
assert b.nome == 'nome', 'Atributo nome com valor incorreto'
def test_banco_contas_vazias():
b = Banco('nome')
assert b.contas == [], (
'O banco deve começar sem nenhuma conta (lista vazia)')
def test_banco_contas_existentes():
b = Banco('nome')
c = Cliente('nome', 99999999, 'email@mail.com')
b.abre_conta([c], 200)
b.abre_conta([c], 300)
assert len(b.contas) == 2, 'O banco deveria ter 2 contas'
for cc in b.contas:
assert isinstance(cc, Conta), ('Os elementos da lista de contas '
'devem ser instâncias da classe Conta')
def test_banco_numero_conta():
b = Banco('nome')
c = Cliente('nome', 99999999, 'email@mail.com')
for _ in range(5):
b.abre_conta([c], 100)
ccs = b.contas
assert len(ccs) == 5, 'O banco deveria ter 5 contas'
assert all([isinstance(cc, Conta) for cc in ccs]), (
'Os elementos da lista de contas '
'devem ser instâncias da classe Conta'
)
ordinais = ['primeira', 'segunda', 'terceira', 'quarta', 'quinta']
msg = 'A {} conta deve ter o numero {}'
for i, texto in enumerate(ordinais):
assert ccs[i].numero == i+1, msg.format(texto, i+1)
def test_banco_abre_conta_erro():
b = Banco('nome')
c = Cliente('nome', 99999999, 'email@mail.com')
try:
b.abre_conta([c], -100)
except ValueError:
pass
except Exception:
assert False, (
'Erro diferente de ValueError para saldo inicial negativo')
assert len(b.contas) == 0, 'Abriu uma conta com saldo inicial negativo'
# ---------------------------- #
# -- Testes da classe Conta -- #
# ---------------------------- #
def test_cria_conta():
try:
c = Cliente('nome', 99999999, 'email@mail.com')
cc = Conta([c], 1, 0)
except Exception:
assert False, 'Erro ao criar conta'
else:
assert hasattr(cc, 'clientes'), 'Não criou o atributo público clientes'
assert hasattr(cc, 'numero'), 'Não criou o atributo público numero'
assert hasattr(cc, 'saldo'), 'Não criou o atributo público saldo'
def test_conta_clientes():
c = Cliente('nome', 99999999, 'email@mail.com')
cc = Conta([c], 1, 0)
clientes = cc.clientes
assert len(clientes) == 1, 'Esta conta deveria ter apenas 1 cliente'
assert clientes[0].nome == 'nome', 'Nome do cliente incorreto'
assert clientes[0].email == 'email@mail.com', 'Email do cliente incorreto'
assert clientes[0].telefone == 99999999, 'Telefone do cliente incorreto'
def test_conta_numero():
c = Cliente('nome', 99999999, 'email@mail.com')
cc = Conta([c], 1, 0)
assert cc.numero == 1, 'Número da conta incorreto'
def test_conta_saldo():
c = Cliente('nome', 99999999, 'email@mail.com')
cc = Conta([c], 1, 100)
assert cc.saldo == 100, 'Saldo da conta incorreto'
def test_conta_deposito():
c = Cliente('nome', 99999999, 'email@mail.com')
cc = Conta([c], 1, 100)
cc.depositar(200)
assert cc.saldo == 300, 'Saldo da conta incorreto'
assert ('deposito', 200) in cc.tirar_extrato(), (
'Depósito não registrado no extrato')
def test_conta_saque():
c = Cliente('nome', 99999999, 'email@mail.com')
cc = Conta([c], 1, 100)
cc.sacar(50)
assert cc.saldo == 50, 'Saldo da conta incorreto'
assert ('saque', 50) in cc.tirar_extrato(), (
'Saque não registrado no extrato')
def test_conta_saque_erro():
c = Cliente('nome', 99999999, 'email@mail.com')
cc = Conta([c], 1, 100)
try:
cc.sacar(150)
except ValueError:
assert cc.saldo == 100, (
'O saldo não deve ser alterado quando o saque for inválido')
assert ('saque', 150) not in cc.tirar_extrato(), (
'Um saque inválido não deve ser registrado no extrato')
except Exception:
assert False, 'Erro diferente de ValueError para saque inválido'
else:
assert False, 'Permitiu a realização de um saque inválido'
def test_conta_extrato():
c = Cliente('nome', 99999999, 'email@mail.com')
cc = Conta([c], 1, 100)
extrato = cc.tirar_extrato()
assert type(extrato) == list, 'O extrato deve ser uma lista'
assert len(extrato) == 1, (
'O extrato deve conter apenas uma entrada para esse teste')
assert ('saldo inicial', 100) in extrato, (
'Saque inicial não registrado no extrato')
def test_conta_extrato_2():
c = Cliente('nome', 99999999, 'email@mail.com')
cc = Conta([c], 1, 200)
cc.sacar(150)
cc.depositar(250)
extrato = cc.tirar_extrato()
assert len(extrato) == 3, (
'O extrato deve conter duas entradas para esse teste')
assert extrato[0] == ('saldo inicial', 200), (
'A primeira entrada está incorreta')
assert extrato[1] == ('saque', 150), (
'A segunda entrada está incorreta')
assert extrato[2] == ('deposito', 250), (
'A terceira entrada está incorreta')
|
[
"noreply@github.com"
] |
zTaverna.noreply@github.com
|
4414e887a8a0068d38d7683dd44cb8232ff8b399
|
2c33ee7ea716fe212daebeddf97e255ba419fdbe
|
/ds/linkedlist.py
|
b881c83d617debb4459f0f0170040a85401600ef
|
[] |
no_license
|
adilreza/Competitive-Programming-problem-solving
|
1ef29b6a927d04d573e05aadb8837e0cd6eed26c
|
f96863e3cbd10d4047936088cecb91007b0331d4
|
refs/heads/master
| 2022-08-21T07:44:09.635007
| 2022-07-25T17:23:29
| 2022-07-25T17:23:29
| 213,891,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
if __name__ == '__main__':
linked_list = LinkedList
linked_list.head = Node(1)
second = Node(2)
third = Node(3)
# connect nodes
linked_list.head.next = second
second.next = third
while linked_list.head != None:
print(linked_list.head.data, end=",")
linked_list.head = linked_list.head.next
|
[
"noreply@github.com"
] |
adilreza.noreply@github.com
|
aa2efaabcafda410600f0fd2d7c7768078daf997
|
fadcf8c03dfeede5f3401263be9c7bdac91a815b
|
/radio button.py
|
267e3a85a47242b100a90296feb2d15e55c75665
|
[] |
no_license
|
Anjalkhadka/GUIproject
|
20ae867eed1a10e36a46363e216fff4e9e58cda3
|
42b03df95b72686f3cf51170a40b167508720dab
|
refs/heads/master
| 2023-07-01T16:28:49.763623
| 2021-08-02T14:43:37
| 2021-08-02T14:43:37
| 385,206,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
from tkinter import*
root=Tk()
HOTEL=[
("Momo","Momo"),
("cheese","cheese"),
("Mushroom","Mushroom"),
("Pizza","Pizza")
]
Food=StringVar()
Food.set("Momo")
for text, hotel in HOTEL:
Radiobutton(root, text=text, variable=Food, value=hotel)
def clicked(value):
myLabel=Label(root,text=value)
myLabel.pack()
myButton=Button(root,text="click",command=lambda:clicked(Food.get())).pack()
root.mainloop()
|
[
"Anjalkhadka"
] |
Anjalkhadka
|
4752ed2589db2285e40e377754a03f021ff3f54a
|
550f9357b5a043c2567dc0f4f83127d14bd22d46
|
/Entrega7/Entrega7_JP.py
|
2be7cbab3353b87c58bf3415b0b76a402d1bd271
|
[] |
no_license
|
juansilva1/MCOC2020-P1
|
175eb828f79729a1d75aea3db6e93d4386bb16d7
|
42f8bee5fada344c088df84746d1dd3af974526b
|
refs/heads/master
| 2022-12-12T00:40:51.129650
| 2020-09-11T16:19:11
| 2020-09-11T16:19:11
| 290,047,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,280
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 19:55:25 2020
@author: jpsil
"""
import numpy as np
import scipy as sp
from scipy.integrate import odeint
import math as mt
import warnings
warnings.filterwarnings("ignore")
from matplotlib.pylab import *
from leer_eof import leer_eof
from matplotlib import pyplot as plt
from sys import argv
fname = argv[1]
t, x, y, z, vx, vy, vz = leer_eof(fname)
# Punto inicial
vector_inicial_real=np.array([x[0],y[0],z[0],vx[0],vy[0],vz[0]])
# Punto final
vector_final_real=np.array([x[-1],y[-1],z[-1],vx[-1],vy[-1],vz[-1]])
# Datos
radio_tierra=6378136.3 #m
masa_tierra=(5.97219*(10**24)) #kg
G=(6.67392*(10**-11))# m3/s2kg
omega=7.2921150e-5# rad/s
km3=(1000.)**3
km5=(1000.)**5
km6=(1000.)**6
mu=398600.4415*km3
# print(mu)
# mu=G*masa_tierra
# print(mu)
J2=1.7555280486257942e+25
J3=-2.6191328602512207e+29
# algunas funciones utiles para el futuro:
# Esta funcion se encarga de relacionar las variables del problema
def bala(z,t):
c=np.cos(omega*t)
s=np.sin(omega*t)
Rs=np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
Rp=omega*np.array([[-s, -c, 0],
[c, -s, 0],
[0, 0, 0]])
Rpp=(omega**2)*np.array([[-c, s, 0],
[-s, -c, 0],
[0, 0, 0]])
x=z[0:3]
xp=z[3:6]
r=np.sqrt(np.dot(x,x))
xstill=Rs@x
rnorm=xstill/r
Fg=-(mu/r**2)*rnorm
z2=xstill[2]**2
rflat=xstill[0]**2+xstill[1]**2
Fx= -5.26658414587738e+25*xstill[0]*xstill[2]**2*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.5) - 3.0*xstill[0]*(2.63329207293869e+25*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 8.77764024312897e+24)*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-2.5) - 4.0*xstill[0]*(-6.54783215062805e+29*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) + 3.92869929037683e+29*xstill[2]/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2))*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.0) - 9.0*xstill[0]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-5.5)*(-1.1114550082219e+64*xstill[2]**8/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**4 + 2.07471601534756e+64*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 1.1969515473159e+64*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 2.17627554057436e+63*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 6.04520983492878e+61) - 8.0*xstill[0]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-5.0)*(-1.61248026818891e+57*xstill[2]**7/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(7/2) + 2.60477581784362e+57*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) - 1.18398900811074e+57*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) + 1.31554334234526e+56*xstill[2]/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) - 7.0*xstill[0]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.5)*(2.09075678222103e+50*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 2.85103197575595e+50*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 9.50343991918651e+49*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 4.525447580565e+48) - 6.0*xstill[0]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.0)*(-7.54485777124404e+42*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) + 8.38317530138227e+42*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) - 1.79639470743906e+42*xstill[2]/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) - 5.0*xstill[0]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.5)*(-4.67333271026623e+36*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 4.00571375165677e+36*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 4.00571375165677e+35) + (1.96434964518842e+30*xstill[0]*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) - 3.92869929037683e+29*xstill[0]*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2))*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-2.0) + (1.86933308410649e+37*xstill[0]*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 8.01142750331354e+36*xstill[0]*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2)*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-2.5) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.5)*(8.89164006577524e+64*xstill[0]*xstill[2]**8/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**5 - 1.24482960920853e+65*xstill[0]*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**4 + 4.78780618926359e+64*xstill[0]*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 4.35255108114872e+63*xstill[0]*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.0)*(1.12873618773224e+58*xstill[0]*xstill[2]**7/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(9/2) - 1.30238790892181e+58*xstill[0]*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(7/2) + 3.55196702433221e+57*xstill[0]*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) - 1.31554334234526e+56*xstill[0]*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2)) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.5)*(-1.25445406933262e+51*xstill[0]*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**4 + 1.14041279030238e+51*xstill[0]*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 1.9006879838373e+50*xstill[0]*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.0)*(3.77242888562202e+43*xstill[0]*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(7/2) - 2.51495259041468e+43*xstill[0]*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) + 1.79639470743906e+42*xstill[0]*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2))
Fy= -5.26658414587738e+25*xstill[1]*xstill[2]**2*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.5) - 3.0*xstill[1]*(2.63329207293869e+25*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 8.77764024312897e+24)*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-2.5) - 4.0*xstill[1]*(-6.54783215062805e+29*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) + 3.92869929037683e+29*xstill[2]/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2))*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.0) - 9.0*xstill[1]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-5.5)*(-1.1114550082219e+64*xstill[2]**8/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**4 + 2.07471601534756e+64*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 1.1969515473159e+64*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 2.17627554057436e+63*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 6.04520983492878e+61) - 8.0*xstill[1]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-5.0)*(-1.61248026818891e+57*xstill[2]**7/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(7/2) + 2.60477581784362e+57*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) - 1.18398900811074e+57*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) + 1.31554334234526e+56*xstill[2]/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) - 7.0*xstill[1]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.5)*(2.09075678222103e+50*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 2.85103197575595e+50*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 9.50343991918651e+49*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 4.525447580565e+48) - 6.0*xstill[1]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.0)*(-7.54485777124404e+42*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) + 8.38317530138227e+42*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) - 1.79639470743906e+42*xstill[2]/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) - 5.0*xstill[1]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.5)*(-4.67333271026623e+36*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 4.00571375165677e+36*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 4.00571375165677e+35) + (1.96434964518842e+30*xstill[1]*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) - 3.92869929037683e+29*xstill[1]*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2))*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-2.0) + (1.86933308410649e+37*xstill[1]*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 8.01142750331354e+36*xstill[1]*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2)*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-2.5) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.5)*(8.89164006577524e+64*xstill[1]*xstill[2]**8/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**5 - 1.24482960920853e+65*xstill[1]*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**4 + 4.78780618926359e+64*xstill[1]*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 4.35255108114872e+63*xstill[1]*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.0)*(1.12873618773224e+58*xstill[1]*xstill[2]**7/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(9/2) - 1.30238790892181e+58*xstill[1]*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(7/2) + 3.55196702433221e+57*xstill[1]*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) - 1.31554334234526e+56*xstill[1]*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2)) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.5)*(-1.25445406933262e+51*xstill[1]*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**4 + 1.14041279030238e+51*xstill[1]*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 1.9006879838373e+50*xstill[1]*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.0)*(3.77242888562202e+43*xstill[1]*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(7/2) - 2.51495259041468e+43*xstill[1]*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) + 1.79639470743906e+42*xstill[1]*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2))
Fz= -3.0*xstill[2]*(2.63329207293869e+25*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 8.77764024312897e+24)*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-2.5) - 4.0*xstill[2]*(-6.54783215062805e+29*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) + 3.92869929037683e+29*xstill[2]/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2))*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.0) - 9.0*xstill[2]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-5.5)*(-1.1114550082219e+64*xstill[2]**8/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**4 + 2.07471601534756e+64*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 1.1969515473159e+64*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 2.17627554057436e+63*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 6.04520983492878e+61) - 8.0*xstill[2]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-5.0)*(-1.61248026818891e+57*xstill[2]**7/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(7/2) + 2.60477581784362e+57*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) - 1.18398900811074e+57*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) + 1.31554334234526e+56*xstill[2]/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) - 7.0*xstill[2]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.5)*(2.09075678222103e+50*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 2.85103197575595e+50*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 9.50343991918651e+49*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 4.525447580565e+48) - 6.0*xstill[2]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.0)*(-7.54485777124404e+42*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) + 8.38317530138227e+42*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) - 1.79639470743906e+42*xstill[2]/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) - 5.0*xstill[2]*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.5)*(-4.67333271026623e+36*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 4.00571375165677e+36*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2) - 4.00571375165677e+35) + (-5.26658414587738e+25*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 5.26658414587738e+25*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2))*(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-1.5) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.5)*(8.89164006577524e+64*xstill[2]**9/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**5 - 2.13399361578606e+65*xstill[2]**7/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**4 + 1.72361022813489e+65*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 5.22306129737846e+64*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 4.35255108114872e+63*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-4.0)*(1.12873618773224e+58*xstill[2]**8/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(9/2) - 2.43112409665405e+58*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(7/2) + 1.65758461135503e+58*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) - 3.68352135856674e+57*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) + 1.31554334234526e+56/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.5)*(-1.25445406933262e+51*xstill[2]**7/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**4 + 2.394866859635e+51*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 1.33048158868611e+51*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 1.9006879838373e+50*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-3.0)*(3.77242888562202e+43*xstill[2]**6/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(7/2) - 6.2873814760367e+43*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) + 2.69459206115859e+43*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) - 1.79639470743906e+42/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-2.5)*(1.86933308410649e+37*xstill[2]**5/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**3 - 2.67047583443785e+37*xstill[2]**3/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**2 + 8.01142750331354e+36*xstill[2]/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)) + (xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(-2.0)*(1.96434964518842e+30*xstill[2]**4/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(5/2) - 2.3572195742261e+30*xstill[2]**2/(xstill[0]**2 + xstill[1]**2 + xstill[2]**2)**(3/2) + 3.92869929037683e+29/sqrt(xstill[0]**2 + xstill[1]**2 + xstill[2]**2))
# xr=xstill
JNN=-np.array([Fx,Fy,Fz])
zp=np.zeros(6)
zp[0:3]=xp
zp[3:6]=Rs.T@(Fg+JNN-(2*Rp@xp+Rpp@x))
return zp
from matplotlib.pylab import *
from scipy.integrate import odeint
import datetime as dt
from leer_eof import leer_eof
import numpy as np
#Leer desde comando
"""
#leer de la linea de comando
from sys import argv
eofname=argv[1]
"""
import xml
import xml.etree.ElementTree as ET
from numpy import zeros
import datetime as dt
#Leer desde terminal
eofname=fname
sat_t,sat_x,sat_y,sat_z,sat_vx,sat_vy,sat_vz = leer_eof(eofname)
z0=[sat_x[0],sat_y[0],sat_z[0],sat_vx[0],sat_vy[0],sat_vz[0]]
vector = odeint(bala,z0,t)
# print(sol)
x = vector[:,0]
y = vector[:,1]
z = vector[:,2]
vx = vector[:,3]
vy = vector[:,4]
vz = vector[:,5]
"""
FUNCIONA
"""
from matplotlib.pylab import *
from scipy.integrate import odeint
import datetime as dt
from leer_eof import leer_eof
import numpy as np
#leer de la linea de comando
from sys import argv
archivo=argv[1]
archivo=open(f'{fname}')
string_name=str(fname)
archivo_final=string_name.replace('.EOF','.PRED')
archivo1=open(archivo_final,'w')
contador=0
lista_datos=[]
for line in archivo:
cambio=line
if '<X unit="m"' in line:
cambio=line.replace(line,(' '*6+'<X unit="m">'+str(x[contador])+'</X>' + "\n" ))
elif '<Y unit="m"' in line:
cambio=line.replace(line,(' '*6+'<Y unit="m">'+str(y[contador])+'</Y>' + "\n" ))
elif '<Z unit="m"' in line:
cambio=line.replace(line,(' '*6+'<Z unit="m">'+str(z[contador])+'</Z>' + "\n" ))
elif '<VX unit="m/s"' in line:
cambio=line.replace(line,(' '*6+'<VX unit="m">'+str(vx[contador])+'</VX>' + "\n" ))
elif '<VY unit="m/s"' in line:
cambio=line.replace(line,(' '*6+'<VY unit="m">'+str(vy[contador])+'</VY>' + "\n" ))
elif '<VZ unit="m/s"' in line:
cambio=line.replace(line,(' '*6+'<VZ unit="m">'+str(vz[contador])+'</VZ>' + "\n" ))
contador+=1
archivo1.write(cambio)
archivo.close()
# for i in lista_datos:
# archivo1.close()
|
[
"noreply@github.com"
] |
juansilva1.noreply@github.com
|
bb440e15cc1e0b4db0b4460b511af4f601929c7f
|
c8860354053b56e1f24ebe642ccdaf7116c141e0
|
/javascript/js-day08/index.py
|
c8ed86ed00a35bcd38830bfec329d2f80ae40d5c
|
[] |
no_license
|
wangsiman0419/notebooks
|
fa4e3f0d55f78fe70083ac04c95fe57cc7443389
|
a867efac9a5a49bc69a36ade71283e91912484a2
|
refs/heads/master
| 2023-01-13T18:24:41.447845
| 2020-02-25T06:00:13
| 2020-02-25T06:00:13
| 242,922,346
| 9
| 0
| null | 2023-01-07T15:14:14
| 2020-02-25T05:58:16
|
HTML
|
UTF-8
|
Python
| false
| false
| 24
|
py
|
{
var a:number=10;
}
|
[
"1713411498@qq.com"
] |
1713411498@qq.com
|
d2b403689a68070e761b5d1f555b68ec5ea11d8b
|
4542db1d4955aaf7c53c9ff7282d064a066ff393
|
/2020/December/18-Dec/cached properties.py
|
83642392c637a53b08864ec08c3a4b3c317761f1
|
[] |
no_license
|
mohanbabu2706/100
|
7227527b0e0af1e4f69d194b7537c7aef27a810d
|
3c5a8b769fd4205afb3e3fd7e9cbf2ebf053b7b9
|
refs/heads/master
| 2023-02-20T09:56:45.970290
| 2021-01-20T10:09:09
| 2021-01-20T10:09:09
| 297,233,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,143
|
py
|
import time
class cached_property(object):
'''Decorator for read-only properties evaluated only once within TTL period.
It can be used to create a cached property like this::
import random
#the clas containing the property must be a new-stye class
class MyClass(object):
#create property whose value iscached for ten minutes
@cached_property(tt1=600)
def randint(self):
#will only be evaluated every 10 min.at maximum.
return random.randit(0,100)
The value is cached in the '_cache' attribute of the object instance that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator.Each entry in the cache is
created only when the property is accessed for the first time is a
two-element tuple with the last computed property value and the last time
it was updated in seconds since the epoch.
The default time-to-live(TTL) is 300 seconds (5 minutes).Set the TTL to
zero for the cached value to never expire.
To expire cached property value manually just do::
del instance._cache[<property name>]
'''
def __init__(self,ttl=300):
self.ttl = ttl
def __call__(self,fget,doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__=fget.__name__
self.__module__=fget.__module__
return self
def __get__(self,inst,owner):
now = time.time()
try:
value, last_update = inst._cache[self.__name__]
if self.ttl>0 and now - last_update>self.ttl:
raise AttributeError
except (KeyError,AttributeError):
value = self.fget(inst)
try:
cache = inst._cache
except AttributeError:
cache = inst._cache = {}
cache[self.__name__] = (value,now)
return value
|
[
"noreply@github.com"
] |
mohanbabu2706.noreply@github.com
|
e933f91edd2539de5be5866fa1bb783d5b208c95
|
454cf082f0fccac3d957ffd27f746a177995f559
|
/exercise02/exercise02.py
|
5239a3e9b88895bd233584ff48f8fa16e5954a0d
|
[] |
no_license
|
bernardoVale/python-exercices
|
414a4bf6e0a30b6afe605e15a35aa33b684d6e56
|
392ebc03c975dcb02c6d3b7ac17374507bdb979d
|
refs/heads/master
| 2021-06-30T15:30:41.216262
| 2017-09-21T12:53:26
| 2017-09-21T12:53:26
| 103,652,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
def grep(pattern, content):
# result = []
#
# list = content.split("\n")
#
# for line in list:
# if pattern in line:
# result.append(line)
#
#
# return result
return [x for x in content.split('\n') if pattern in x]
|
[
"bvale@avenuecode.com"
] |
bvale@avenuecode.com
|
932d546ce883e182b14722823585c21e1e829228
|
f8e3a703ab7198f4c61e5ba630b4f006850db0db
|
/03_dash_core/09_markdown.py
|
0c7dae779a47758e88b5e94f1510314c27954e7b
|
[
"MIT"
] |
permissive
|
krakowiakpawel9/dash-course
|
238a09e92821c4d0a78f4cc9efe1e4dd0b004623
|
6f22d3bf8c339217e5a6c22b5567f1e67ecc2e31
|
refs/heads/master
| 2021-06-24T02:23:47.463648
| 2021-03-06T09:54:32
| 2021-03-06T09:54:32
| 208,616,684
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
"""
@author: krakowiakpawel9@gmail.com
@site: e-smartdata.org
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
markdown="""
Nagłówki
# H1
## H2
### H3
#### H4
##### H5
###### H6
Znaczniki tekstu:
Kursywa: *tekst kursywą* lub _tekst kursywa_
Pogrubienie: **Tekst pogrubiony** lub __tekst pogrubiony__
Kursywa i pogrubienie: **pogrubienie i _kursywa_**
Przekreślenie: ~~Przekreślenie~~
Listy:
Lista uporządkowana:
1. Python
2. SQL
3. Java
Lista nieuporządkowana:
* Python
* SQL
* Java
Linkowanie
[Google.com](http://www.google.com)
Kod
Użyj `print('Hello World')`
Blok kodu
```
import numpy as np
x = np.random.randn(100)
print(x)
```
```
SELECT * FROM products;
```
Table:
|UserID |Rating |Age|
|---------|----------|---|
|001 |4.5 |23 |
|002 |5 |34 |
Cytowanie:
> Python jest bardzo poręczny i łatwy do nauki.
Lorem Ipsum jest tekstem stosowanym jako przykładowy wypełniacz w przemyśle poligraficznym.
Został po raz pierwszy użyty w XV w. przez nieznanego drukarza do wypełnienia tekstem próbnej książki.
Pięć wieków później zaczął być używany przemyśle elektronicznym, pozostając praktycznie niezmienionym.
Spopularyzował się w latach 60. XX w. wraz z publikacją arkuszy Letrasetu, zawierających fragmenty
Lorem Ipsum, a ostatnio z zawierającym różne wersje Lorem Ipsum oprogramowaniem przeznaczonym do
realizacji druków na komputerach osobistych, jak Aldus PageMaker
> Lorem Ipsum jest tekstem stosowanym jako przykładowy wypełniacz w przemyśle poligraficznym.
Został po raz pierwszy użyty w XV w. przez nieznanego drukarza do wypełnienia tekstem próbnej książki.
Pięć wieków później zaczął być używany przemyśle elektronicznym, pozostając praktycznie niezmienionym.
Spopularyzował się w latach 60. XX w. wraz z publikacją arkuszy Letrasetu, zawierających fragmenty
Lorem Ipsum, a ostatnio z zawierającym różne wersje Lorem Ipsum oprogramowaniem przeznaczonym do
realizacji druków na komputerach osobistych, jak Aldus PageMaker
Linie horyzontalne
---
***
"""
app.layout = html.Div([
dcc.Markdown(markdown)
])
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"krakowiakpawel9@gmail.com"
] |
krakowiakpawel9@gmail.com
|
3b5a9f66bed96258c8c57f579b82332a9de54aee
|
efcadad286cda42766368e0e4723960382f0dcf2
|
/test/test_api_response_zacks_sales_surprises.py
|
024cf3720367727d924cf82308ae9d16b42f1b97
|
[] |
no_license
|
ryanleland/python-sdk
|
5056016dadd1088f7fa6f54f57473015ff8f1c5b
|
62504257bac27d99a59215f6b95bdd0b73461e4b
|
refs/heads/master
| 2020-05-24T18:03:41.215215
| 2019-05-17T21:54:07
| 2019-05-17T21:54:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
# coding: utf-8
"""
Intrinio API
Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # noqa: E501
OpenAPI spec version: 2.6.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import intrinio_sdk
from intrinio_sdk.models.api_response_zacks_sales_surprises import ApiResponseZacksSalesSurprises # noqa: E501
from intrinio_sdk.rest import ApiException
class TestApiResponseZacksSalesSurprises(unittest.TestCase):
"""ApiResponseZacksSalesSurprises unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiResponseZacksSalesSurprises(self):
"""Test ApiResponseZacksSalesSurprises"""
# FIXME: construct object with mandatory attributes with example values
# model = intrinio_sdk.models.api_response_zacks_sales_surprises.ApiResponseZacksSalesSurprises() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"kgmillerdev@gmail.com"
] |
kgmillerdev@gmail.com
|
58713acbcb618bafad559d590e6798f41d3d017b
|
a6f4701ac1d0e5b922961ee24f7d0ca8e2aca75b
|
/constants.py
|
895b7a5a09b8e2feb8ef21968da8107b0fac9bca
|
[
"MIT"
] |
permissive
|
MFarelS/ID_AzanBot
|
ce5efebfe3524a52af7ac2f8dddbe0ecbf677ee8
|
e48bff507ade5e811a6bbac2bfe8615448b09e83
|
refs/heads/master
| 2022-01-05T04:31:50.167809
| 2019-05-05T00:02:14
| 2019-05-05T00:02:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,453
|
py
|
#Maximum sending message before increase the job to next 1 sec
MAX_MESSAGE = 25
#Lead Time of UTC in minutes
LEADTIME = 1
#Latency Limit
LATENCY_LIMIT = 30.0
#Job Limit
JOB_LIMIT = 50.0
#StoreBot review URL
STOREBOT_REVIEW_URL = 'https://telegram.me/storebot?start=id_azanbot'
#KEMENAG file directory
KEMENAG_DIR = '/home/untorojati/python/id_azanbot/src/'
#LOG file directory
LOG_DIR = '/home/untorojati/python/id_azanbot/log/'
#Local Day Name
LOCAL_DAY = ['Senin', 'Selasa', 'Rabu', 'Kamis', 'Jumat', 'Sabtu', 'Ahad']
#Local Month Name
LOCAL_MONTH = ['None', 'Januari', 'Februari', 'Maret', 'April', 'Mei', 'Juni', 'Juli', 'Agustus', 'September', 'Oktober', 'November', 'Desember']
#Hijri Month Name
HIJRI_MONTH = ['None', 'Muharram', 'Safar', 'Rabiul awal', 'Rabiul akhir', 'Jumadil awal', 'Jumadil akhir', 'Rajab', "Sya'ban", 'Ramadhan', 'Syawal', 'Dzulkaidah', 'Dzulhijjah']
#MY STATES
STATES = [
'Aceh',
'Bali',
'Bangka Belitung',
'Banten',
'Bengkulu',
'DKI Jakarta',
'Gorontalo',
'Jambi',
'Jawa Barat',
'Jawa Tengah',
'Jawa Timur',
'Kalimantan Barat',
'Kalimantan Selatan',
'Kalimantan Tengah',
'Kalimantan Timur',
'Kalimantan Utara',
'Kepulauan Riau',
'Lampung',
'Maluku',
'Maluku Utara',
'NTB',
'NTT',
'Papua',
'Papua Barat',
'Riau',
'Sulawesi Barat',
'Sulawesi Selatan',
'Sulawesi Tengah',
'Sulawesi Tenggara',
'Sulawesi Utara',
'Sumatera Barat',
'Sumatera Selatan',
'Sumatera Utara',
'Yogyakarta'
]
|
[
"bambang.untoro@gmail.com"
] |
bambang.untoro@gmail.com
|
2f7a90a5d8520cfe8d34d381c034931fca9f9969
|
06f9c6a9375557ace9450184808e3a6515f5527a
|
/repi/admin.py
|
d47af96b5e02d07275e9ac0da03bea13e0036dd0
|
[] |
no_license
|
nineblu/renzhijiqiren
|
1fe410d2f523959c2f14a4295c53e1e05ac14e60
|
cb3640c6de713f288278a57200c71d4feb5bfd6e
|
refs/heads/master
| 2020-03-13T13:32:14.801056
| 2018-04-26T10:44:11
| 2018-04-26T10:44:11
| 131,140,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.infos)
|
[
"24690674+nineblu@users.noreply.github.com"
] |
24690674+nineblu@users.noreply.github.com
|
5001fbf3e0ef8468a40fde339d84c9c6650ebd8c
|
c9e38947d7282ddde28de5ba83a8b465d9420e39
|
/tapti_app/migrations/0012_auto_20190917_2156.py
|
26d85ca2f007e86a3c2321a82d97334aef6e3443
|
[] |
no_license
|
ronbaddi004/tapti
|
79409370c8fe39f4b74917fd98229cb75ddd3d83
|
4f1821e278482ccb3873f7ef9a479ef1d8d96c8a
|
refs/heads/master
| 2020-07-23T21:40:50.934672
| 2019-09-17T17:48:00
| 2019-09-17T17:48:00
| 207,713,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
# Generated by Django 2.2.4 on 2019-09-17 16:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tapti_app', '0011_auto_20190917_2125'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='contact',
name='message',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
|
[
"ronbaddi004@gmail.com"
] |
ronbaddi004@gmail.com
|
dff8672fc730707a8fb570bb2ccef8069937803c
|
b910f60b2d6a16f5bdc2156f7af78e9cc32c95f4
|
/crm/migrations/0011_auto_20170823_1554.py
|
85f98e847a1afdb9b60db8af4a82ebd0243cd17f
|
[] |
no_license
|
Ethan30k/mycrm
|
6fe9e796ed4d854ebc5c59cc44798c7f103d832a
|
c1e7495de6b8a5fd59897de977a876e13f340373
|
refs/heads/master
| 2021-01-01T06:53:22.002283
| 2017-08-29T06:06:18
| 2017-08-29T06:06:18
| 97,542,559
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-23 07:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0010_auto_20170822_1758'),
]
operations = [
migrations.AlterModelOptions(
name='userprofile',
options={'permissions': (('crm_app_index', '可以查看freeadmin所有的app首页'), ('crm_table_list', '可以查看freeadmin表里的所有数据'), ('crm_table_list_view', '可以查看freeadmin表里每条数据的修改页'), ('crm_table_list_change', '可以修改freeadmin表里每条数据'), ('crm_table_list_add', '可以添加数据'))},
),
]
|
[
"448461456@qq.com"
] |
448461456@qq.com
|
8cef5c46d6db9d942d418b2016ce11f8753f4ad8
|
0b53fc8f70a54e8c856052827140ab4a57c70e09
|
/HW02/Q06/wordFrequency.py
|
f421d98f6b03a17a070a93e983ce1858db3e4e03
|
[] |
no_license
|
ecasiano/PrinciplesOfComplexSystems
|
f3d307e4a5051931deb22931d6aab0a2f1739877
|
05b7a43dc48e96433abac1fc5722903c39f77d95
|
refs/heads/master
| 2020-03-28T03:46:27.761452
| 2018-10-29T22:09:13
| 2018-10-29T22:09:13
| 147,670,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,816
|
py
|
#Emanuel Casiano-Diaz - September 12, 2018.
#INSERT PROGRAM DESCRIPTION
import numpy as np
import matplotlib.pyplot as plt
from math import log
from scipy.stats import linregress
import colors
#with plt.style.context('../../IOP_large.mplstyle'):
#Some nice pastel colors
red = ["#e85c47"]
blue = ["#4173b3"]
orange = ["#ff8c00"]
green = ["#7dcca4"]
violet = ["#b19cd9"]
alpha = [0.8,0.5,0.1]
for i,c in enumerate(alpha):
red.append(colors.get_alpha_hex(red[0],alpha[i]))
orange.append(colors.get_alpha_hex(orange[0],alpha[i]))
green.append(colors.get_alpha_hex(green[0],alpha[i]))
blue.append(colors.get_alpha_hex(blue[0],alpha[i]))
violet.append(colors.get_alpha_hex(violet[0],alpha[i]))
"--------------------------------------------------------------------"
#Define functions
def findPowerLaw(x,y,rSquared=False):
'''Input: x and y data (arrays of real numbers)'''
'''Output: Prefactor and exponent of power law scaling'''
#Perform linear regression of x,y data
#p , logA = np.polyfit(np.log10(x),np.log10(y),deg=1)
p, logA, r_value, p_value, std_err = linregress(np.log10(x),np.log10(y))
A = 10**(logA)
#p: exponent, A: prefactor
if(rSquared == False):
return A,p
else: return A,p,r_value**2
def findNearest(array, val, above=None, below=None):
#array = np.sort(array)
indicesUpToVal = np.where(array <= val)
valIdx = np.max(indicesUpToVal)
return valIdx
"--------------------------------------------------------------------"
#Load data
data = np.loadtxt("vocab_cs_mod.txt")
dataSlice = np.loadtxt("vocab_cs_modSliced.txt")
k = data[:,0] #How many times a set of words appears
Nk = data[:,1] #How many words are in the set that appears k times
kSlice = dataSlice[:,0] # Same but in the region where eyeballing
NkSlice = dataSlice[:,1] # says Nk follows a power law in k.
#Eye-balled max
eyeBalledMax = 1E+04
#Find the power law exponent and pre-factor in the region
#where eye-ballingology says the data follows a power-law.
A,p,r2 = findPowerLaw(kSlice,NkSlice,rSquared=True)
#Use the obtained pre-factor and exponent to perform a fit in the eye-balled region
kFit = np.linspace(kSlice[-1],kSlice[0],1000)
NkFit = A*kFit**p
#Mean and standard deviation of the sample
NkMean = np.mean(Nk)
NkVar = np.var(Nk)
NkStDev = np.std(Nk)
print("---Nk---")
print("k Bounds: low: %.2E high: %.2E"%(k[-1],k[0]))
print("Power Law Exponent: %.4f"%(p))
print("Mean: %.4f"%(NkMean))
print("Variance: %.4f"%(NkVar))
print("Standard Deviation: %.4f"%(NkStDev))
"--------------------------------------------------------------------"
#Plot
fig, ax1 = plt.subplots()
ax1.plot(k,Nk, 'o',color=violet[1],mfc='None',label=r'($\frac{1}{3},\frac{1}{3},\frac{1}{3}$)',linewidth=0.5,markersize=1.25,rasterized=True)
ax1.set_xlabel(r"$N_k$")
ax1.set_ylabel(r"$k$")
ax1.tick_params(axis='both', which='both', left='on', right='off', top='off', bottom='on', labelleft='on', labelbottom='on', direction = 'in')
plt.savefig("wordFrequency.pdf")
fig, ax2 = plt.subplots()
ax2.plot(k,Nk, 'o',color=violet[1],mfc='None',label=r'($\frac{1}{3},\frac{1}{3},\frac{1}{3}$)',linewidth=0.5,markersize=1.25,rasterized=True)
ax2.plot(kFit,NkFit, '-',color='black',mfc='None',label=r'($\frac{1}{3},\frac{1}{3},\frac{1}{3}$)',linewidth=0.75,rasterized=True)
ax2.set_ylabel(r"$N_k$")
ax2.set_xlabel(r"$k$")
ax2.loglog()
plt.axvspan(k[np.argmax(Nk)],eyeBalledMax, facecolor=orange[1], alpha=0.25)
ax2.tick_params(axis='both', which='both', left='on', right='off', top='off', bottom='on', labelleft='on', labelbottom='on', direction = 'in')
ax2.text(2E+05,1E+03,r"$(N_K)_{10^2 \leq k \leq 10^4} \approx (3.67x10^8) k^{-1.70}$",fontsize=12)
ax2.text(2E+05,3E+02,r"$R^2 = %.5f$"%(r2),fontsize=12)
plt.savefig("wordFrequencyLog.pdf")
|
[
"Emanuel.Casiano-Diaz@uvm.edu"
] |
Emanuel.Casiano-Diaz@uvm.edu
|
194ab3b60f32137818023e1575441ad58c6fcffe
|
7681040d0def5247880168361671e6cde0243b4a
|
/1-Lesson-Plans/Unit03-Python/4-Review/Activities/04-Stu_Conditionally_Crazy/Unsolved/drinking_age.py
|
72b3004f7ce56fbb41cefa26db9700ffd30fab55
|
[] |
no_license
|
aditp928/cyber-secruitiy-
|
8c61d7d73ce44d31a077f594a9ee4d420684348e
|
e60d06487d1477fd690a13a8b4c1e19648c78d28
|
refs/heads/master
| 2020-03-28T01:43:47.493344
| 2018-09-05T14:20:49
| 2018-09-05T14:20:49
| 147,526,752
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
# TODO: Set a variable, called `drinking_age`, to 21
# TODO: Prompt the user for their age
# TODO: Check if the user is 21 or older
# TODO: If so, print: "Cheers!"
# TODO: If not, print: "Your fake looks really fake."
|
[
"MyFolder@Adits-MacBook-Air.local"
] |
MyFolder@Adits-MacBook-Air.local
|
ce8d487e56f07eafc10fa20d781341c61806bd82
|
ef6ea60047fbdd8e6155770dcd47dab7ab05055a
|
/talks/migrations/0002_auto_20150708_0959.py
|
cd9c0e6eb30ea0adb558b2f44836acbb958cc52c
|
[] |
no_license
|
nupuragrahari/Web-app-TALKS
|
1c88320b105f766b958ad0f2c4e56650d2cce6ee
|
2ea03087051b57713eff7adcaf656adce8f3b746
|
refs/heads/master
| 2021-01-10T05:48:18.217095
| 2015-10-24T13:19:06
| 2015-10-24T13:19:06
| 44,865,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('talks', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Talk',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=255, blank=True)),
('when', models.DateTimeField()),
('room', models.CharField(max_length=10, choices=[(b'517D', b'517D'), (b'517C', b'517C'), (b'517AB', b'517AB'), (b'520', b'520'), (b'710A', b'710A')])),
('host', models.CharField(max_length=255)),
('talk_list', models.ForeignKey(related_name='talks', to='talks.TalkList')),
],
options={
'ordering': ('when', 'room'),
},
),
migrations.AlterUniqueTogether(
name='talk',
unique_together=set([('talk_list', 'name')]),
),
]
|
[
"nupur.agrahari93@gmail.com"
] |
nupur.agrahari93@gmail.com
|
5cc89d8c3d1847674232c5db8bd22499cbc73a16
|
c8e78af14250dccbb2f89214fb2bd00d3812bcdb
|
/DisasterResponse_code/HerokuTwillo/venv/lib/python2.7/site-packages/mercurial/dirstate.py
|
2b701ab29e8730139d93ced939117135e217c637
|
[
"MIT"
] |
permissive
|
weiningb/DisasterResponse
|
5d6d5e915e95da9fbab41ed051acca31196bac95
|
8068c0a5e2efb81b34e799434b52cd05cc90b0f1
|
refs/heads/master
| 2021-01-21T05:40:12.009792
| 2015-11-18T18:56:37
| 2015-11-18T18:56:37
| 46,703,733
| 1
| 0
| null | 2015-11-23T07:29:07
| 2015-11-23T07:29:07
| null |
UTF-8
|
Python
| false
| false
| 32,961
|
py
|
# dirstate.py - working directory tracking for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullid
from i18n import _
import scmutil, util, ignore, osutil, parsers, encoding, pathutil
import os, stat, errno
propertycache = util.propertycache
filecache = scmutil.filecache
_rangemask = 0x7fffffff
dirstatetuple = parsers.dirstatetuple
class repocache(filecache):
"""filecache for files in .hg/"""
def join(self, obj, fname):
return obj._opener.join(fname)
class rootcache(filecache):
"""filecache for files in the repository root"""
def join(self, obj, fname):
return obj._join(fname)
class dirstate(object):
def __init__(self, opener, ui, root, validate):
'''Create a new dirstate object.
opener is an open()-like callable that can be used to open the
dirstate file; root is the root of the directory tracked by
the dirstate.
'''
self._opener = opener
self._validate = validate
self._root = root
# ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
# UNC path pointing to root share (issue4557)
if root.endswith(os.sep):
self._rootdir = root
else:
self._rootdir = root + os.sep
self._dirty = False
self._dirtypl = False
self._lastnormaltime = 0
self._ui = ui
self._filecache = {}
self._parentwriters = 0
def beginparentchange(self):
'''Marks the beginning of a set of changes that involve changing
the dirstate parents. If there is an exception during this time,
the dirstate will not be written when the wlock is released. This
prevents writing an incoherent dirstate where the parent doesn't
match the contents.
'''
self._parentwriters += 1
def endparentchange(self):
'''Marks the end of a set of changes that involve changing the
dirstate parents. Once all parent changes have been marked done,
the wlock will be free to write the dirstate on release.
'''
if self._parentwriters > 0:
self._parentwriters -= 1
def pendingparentchange(self):
'''Returns true if the dirstate is in the middle of a set of changes
that modify the dirstate parent.
'''
return self._parentwriters > 0
@propertycache
def _map(self):
'''Return the dirstate contents as a map from filename to
(state, mode, size, time).'''
self._read()
return self._map
@propertycache
def _copymap(self):
self._read()
return self._copymap
@propertycache
def _foldmap(self):
f = {}
normcase = util.normcase
for name, s in self._map.iteritems():
if s[0] != 'r':
f[normcase(name)] = name
for name in self._dirs:
f[normcase(name)] = name
f['.'] = '.' # prevents useless util.fspath() invocation
return f
@repocache('branch')
def _branch(self):
try:
return self._opener.read("branch").strip() or "default"
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
return "default"
@propertycache
def _pl(self):
try:
fp = self._opener("dirstate")
st = fp.read(40)
fp.close()
l = len(st)
if l == 40:
return st[:20], st[20:40]
elif l > 0 and l < 40:
raise util.Abort(_('working directory state appears damaged!'))
except IOError, err:
if err.errno != errno.ENOENT:
raise
return [nullid, nullid]
@propertycache
def _dirs(self):
return scmutil.dirs(self._map, 'r')
def dirs(self):
return self._dirs
@rootcache('.hgignore')
def _ignore(self):
files = [self._join('.hgignore')]
for name, path in self._ui.configitems("ui"):
if name == 'ignore' or name.startswith('ignore.'):
# we need to use os.path.join here rather than self._join
# because path is arbitrary and user-specified
files.append(os.path.join(self._rootdir, util.expandpath(path)))
return ignore.ignore(self._root, files, self._ui.warn)
@propertycache
def _slash(self):
return self._ui.configbool('ui', 'slash') and os.sep != '/'
@propertycache
def _checklink(self):
return util.checklink(self._root)
@propertycache
def _checkexec(self):
return util.checkexec(self._root)
@propertycache
def _checkcase(self):
return not util.checkcase(self._join('.hg'))
def _join(self, f):
# much faster than os.path.join()
# it's safe because f is always a relative path
return self._rootdir + f
def flagfunc(self, buildfallback):
if self._checklink and self._checkexec:
def f(x):
try:
st = os.lstat(self._join(x))
if util.statislink(st):
return 'l'
if util.statisexec(st):
return 'x'
except OSError:
pass
return ''
return f
fallback = buildfallback()
if self._checklink:
def f(x):
if os.path.islink(self._join(x)):
return 'l'
if 'x' in fallback(x):
return 'x'
return ''
return f
if self._checkexec:
def f(x):
if 'l' in fallback(x):
return 'l'
if util.isexec(self._join(x)):
return 'x'
return ''
return f
else:
return fallback
@propertycache
def _cwd(self):
return os.getcwd()
def getcwd(self):
cwd = self._cwd
if cwd == self._root:
return ''
# self._root ends with a path separator if self._root is '/' or 'C:\'
rootsep = self._root
if not util.endswithsep(rootsep):
rootsep += os.sep
if cwd.startswith(rootsep):
return cwd[len(rootsep):]
else:
# we're outside the repo. return an absolute path.
return cwd
def pathto(self, f, cwd=None):
if cwd is None:
cwd = self.getcwd()
path = util.pathto(self._root, cwd, f)
if self._slash:
return util.pconvert(path)
return path
def __getitem__(self, key):
'''Return the current state of key (a filename) in the dirstate.
States are:
n normal
m needs merging
r marked for removal
a marked for addition
? not tracked
'''
return self._map.get(key, ("?",))[0]
def __contains__(self, key):
return key in self._map
def __iter__(self):
for x in sorted(self._map):
yield x
def iteritems(self):
return self._map.iteritems()
def parents(self):
return [self._validate(p) for p in self._pl]
def p1(self):
return self._validate(self._pl[0])
def p2(self):
return self._validate(self._pl[1])
def branch(self):
return encoding.tolocal(self._branch)
def setparents(self, p1, p2=nullid):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
adjusted to normal and previous copy records discarded and
returned by the call.
See localrepo.setparents()
"""
if self._parentwriters == 0:
raise ValueError("cannot set dirstate parent without "
"calling dirstate.beginparentchange")
self._dirty = self._dirtypl = True
oldp2 = self._pl[1]
self._pl = p1, p2
copies = {}
if oldp2 != nullid and p2 == nullid:
for f, s in self._map.iteritems():
# Discard 'm' markers when moving away from a merge state
if s[0] == 'm':
if f in self._copymap:
copies[f] = self._copymap[f]
self.normallookup(f)
# Also fix up otherparent markers
elif s[0] == 'n' and s[2] == -2:
if f in self._copymap:
copies[f] = self._copymap[f]
self.add(f)
return copies
def setbranch(self, branch):
self._branch = encoding.fromlocal(branch)
f = self._opener('branch', 'w', atomictemp=True)
try:
f.write(self._branch + '\n')
f.close()
# make sure filecache has the correct stat info for _branch after
# replacing the underlying file
ce = self._filecache['_branch']
if ce:
ce.refresh()
except: # re-raises
f.discard()
raise
def _read(self):
self._map = {}
self._copymap = {}
try:
st = self._opener.read("dirstate")
except IOError, err:
if err.errno != errno.ENOENT:
raise
return
if not st:
return
# Python's garbage collector triggers a GC each time a certain number
# of container objects (the number being defined by
# gc.get_threshold()) are allocated. parse_dirstate creates a tuple
# for each file in the dirstate. The C version then immediately marks
# them as not to be tracked by the collector. However, this has no
# effect on when GCs are triggered, only on what objects the GC looks
# into. This means that O(number of files) GCs are unavoidable.
# Depending on when in the process's lifetime the dirstate is parsed,
# this can get very expensive. As a workaround, disable GC while
# parsing the dirstate.
#
# (we cannot decorate the function directly since it is in a C module)
parse_dirstate = util.nogc(parsers.parse_dirstate)
p = parse_dirstate(self._map, self._copymap, st)
if not self._dirtypl:
self._pl = p
def invalidate(self):
for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
"_ignore"):
if a in self.__dict__:
delattr(self, a)
self._lastnormaltime = 0
self._dirty = False
self._parentwriters = 0
def copy(self, source, dest):
"""Mark dest as a copy of source. Unmark dest if source is None."""
if source == dest:
return
self._dirty = True
if source is not None:
self._copymap[dest] = source
elif dest in self._copymap:
del self._copymap[dest]
def copied(self, file):
return self._copymap.get(file, None)
def copies(self):
return self._copymap
def _droppath(self, f):
if self[f] not in "?r" and "_dirs" in self.__dict__:
self._dirs.delpath(f)
def _addpath(self, f, state, mode, size, mtime):
oldstate = self[f]
if state == 'a' or oldstate == 'r':
scmutil.checkfilename(f)
if f in self._dirs:
raise util.Abort(_('directory %r already in dirstate') % f)
# shadows
for d in scmutil.finddirs(f):
if d in self._dirs:
break
if d in self._map and self[d] != 'r':
raise util.Abort(
_('file %r in dirstate clashes with %r') % (d, f))
if oldstate in "?r" and "_dirs" in self.__dict__:
self._dirs.addpath(f)
self._dirty = True
self._map[f] = dirstatetuple(state, mode, size, mtime)
def normal(self, f):
'''Mark a file normal and clean.'''
s = os.lstat(self._join(f))
mtime = int(s.st_mtime)
self._addpath(f, 'n', s.st_mode,
s.st_size & _rangemask, mtime & _rangemask)
if f in self._copymap:
del self._copymap[f]
if mtime > self._lastnormaltime:
# Remember the most recent modification timeslot for status(),
# to make sure we won't miss future size-preserving file content
# modifications that happen within the same timeslot.
self._lastnormaltime = mtime
def normallookup(self, f):
'''Mark a file normal, but possibly dirty.'''
if self._pl[1] != nullid and f in self._map:
# if there is a merge going on and the file was either
# in state 'm' (-1) or coming from other parent (-2) before
# being removed, restore that state.
entry = self._map[f]
if entry[0] == 'r' and entry[2] in (-1, -2):
source = self._copymap.get(f)
if entry[2] == -1:
self.merge(f)
elif entry[2] == -2:
self.otherparent(f)
if source:
self.copy(source, f)
return
if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
return
self._addpath(f, 'n', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def otherparent(self, f):
'''Mark as coming from the other parent, always dirty.'''
if self._pl[1] == nullid:
raise util.Abort(_("setting %r to other parent "
"only allowed in merges") % f)
if f in self and self[f] == 'n':
# merge-like
self._addpath(f, 'm', 0, -2, -1)
else:
# add-like
self._addpath(f, 'n', 0, -2, -1)
if f in self._copymap:
del self._copymap[f]
def add(self, f):
'''Mark a file added.'''
self._addpath(f, 'a', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def remove(self, f):
'''Mark a file removed.'''
self._dirty = True
self._droppath(f)
size = 0
if self._pl[1] != nullid and f in self._map:
# backup the previous state
entry = self._map[f]
if entry[0] == 'm': # merge
size = -1
elif entry[0] == 'n' and entry[2] == -2: # other parent
size = -2
self._map[f] = dirstatetuple('r', 0, size, 0)
if size == 0 and f in self._copymap:
del self._copymap[f]
def merge(self, f):
'''Mark a file merged.'''
if self._pl[1] == nullid:
return self.normallookup(f)
return self.otherparent(f)
def drop(self, f):
'''Drop a file from the dirstate'''
if f in self._map:
self._dirty = True
self._droppath(f)
del self._map[f]
def _normalize(self, path, isknown, ignoremissing=False, exists=None):
normed = util.normcase(path)
folded = self._foldmap.get(normed, None)
if folded is None:
if isknown:
folded = path
else:
if exists is None:
exists = os.path.lexists(os.path.join(self._root, path))
if not exists:
# Maybe a path component exists
if not ignoremissing and '/' in path:
d, f = path.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, None)
folded = d + "/" + f
else:
# No path components, preserve original case
folded = path
else:
# recursively normalize leading directory components
# against dirstate
if '/' in normed:
d, f = normed.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, True)
r = self._root + "/" + d
folded = d + "/" + util.fspath(f, r)
else:
folded = util.fspath(normed, self._root)
self._foldmap[normed] = folded
return folded
def normalize(self, path, isknown=False, ignoremissing=False):
'''
normalize the case of a pathname when on a casefolding filesystem
isknown specifies whether the filename came from walking the
disk, to avoid extra filesystem access.
If ignoremissing is True, missing path are returned
unchanged. Otherwise, we try harder to normalize possibly
existing path components.
The normalized case is determined based on the following precedence:
- version of name already stored in the dirstate
- version of name stored on disk
- version provided via command arguments
'''
if self._checkcase:
return self._normalize(path, isknown, ignoremissing)
return path
def clear(self):
self._map = {}
if "_dirs" in self.__dict__:
delattr(self, "_dirs")
self._copymap = {}
self._pl = [nullid, nullid]
self._lastnormaltime = 0
self._dirty = True
def rebuild(self, parent, allfiles, changedfiles=None):
changedfiles = changedfiles or allfiles
oldmap = self._map
self.clear()
for f in allfiles:
if f not in changedfiles:
self._map[f] = oldmap[f]
else:
if 'x' in allfiles.flags(f):
self._map[f] = dirstatetuple('n', 0777, -1, 0)
else:
self._map[f] = dirstatetuple('n', 0666, -1, 0)
self._pl = (parent, nullid)
self._dirty = True
def write(self):
if not self._dirty:
return
# enough 'delaywrite' prevents 'pack_dirstate' from dropping
# timestamp of each entries in dirstate, because of 'now > mtime'
delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
if delaywrite > 0:
import time # to avoid useless import
time.sleep(delaywrite)
st = self._opener("dirstate", "w", atomictemp=True)
# use the modification time of the newly created temporary file as the
# filesystem's notion of 'now'
now = util.fstat(st).st_mtime
st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
st.close()
self._lastnormaltime = 0
self._dirty = self._dirtypl = False
def _dirignore(self, f):
if f == '.':
return False
if self._ignore(f):
return True
for p in scmutil.finddirs(f):
if self._ignore(p):
return True
return False
def _walkexplicit(self, match, subrepos):
'''Get stat data about the files explicitly specified by match.
Return a triple (results, dirsfound, dirsnotfound).
- results is a mapping from filename to stat result. It also contains
listings mapping subrepos and .hg to None.
- dirsfound is a list of files found to be directories.
- dirsnotfound is a list of files that the dirstate thinks are
directories and that were not found.'''
def badtype(mode):
kind = _('unknown')
if stat.S_ISCHR(mode):
kind = _('character device')
elif stat.S_ISBLK(mode):
kind = _('block device')
elif stat.S_ISFIFO(mode):
kind = _('fifo')
elif stat.S_ISSOCK(mode):
kind = _('socket')
elif stat.S_ISDIR(mode):
kind = _('directory')
return _('unsupported file type (type is %s)') % kind
matchedir = match.explicitdir
badfn = match.bad
dmap = self._map
normpath = util.normpath
lstat = os.lstat
getkind = stat.S_IFMT
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
dirsfound = []
foundadd = dirsfound.append
dirsnotfound = []
notfoundadd = dirsnotfound.append
if match.matchfn != match.exact and self._checkcase:
normalize = self._normalize
else:
normalize = None
files = sorted(match.files())
subrepos.sort()
i, j = 0, 0
while i < len(files) and j < len(subrepos):
subpath = subrepos[j] + "/"
if files[i] < subpath:
i += 1
continue
while i < len(files) and files[i].startswith(subpath):
del files[i]
j += 1
if not files or '.' in files:
files = ['']
results = dict.fromkeys(subrepos)
results['.hg'] = None
alldirs = None
for ff in files:
if normalize:
nf = normalize(normpath(ff), False, True)
else:
nf = normpath(ff)
if nf in results:
continue
try:
st = lstat(join(nf))
kind = getkind(st.st_mode)
if kind == dirkind:
if nf in dmap:
# file replaced by dir on disk but still in dirstate
results[nf] = None
if matchedir:
matchedir(nf)
foundadd((nf, ff))
elif kind == regkind or kind == lnkkind:
results[nf] = st
else:
badfn(ff, badtype(kind))
if nf in dmap:
results[nf] = None
except OSError, inst: # nf not found on disk - it is dirstate only
if nf in dmap: # does it exactly match a missing file?
results[nf] = None
else: # does it match a missing directory?
if alldirs is None:
alldirs = scmutil.dirs(dmap)
if nf in alldirs:
if matchedir:
matchedir(nf)
notfoundadd(nf)
else:
badfn(ff, inst.strerror)
return results, dirsfound, dirsnotfound
def walk(self, match, subrepos, unknown, ignored, full=True):
'''
Walk recursively through the directory tree, finding all files
matched by match.
If full is False, maybe skip some known-clean files.
Return a dict mapping filename to stat-like object (either
mercurial.osutil.stat instance or return value of os.stat()).
'''
# full is a flag that extensions that hook into walk can use -- this
# implementation doesn't use it at all. This satisfies the contract
# because we only guarantee a "maybe".
if ignored:
ignore = util.never
dirignore = util.never
elif unknown:
ignore = self._ignore
dirignore = self._dirignore
else:
# if not unknown and not ignored, drop dir recursion and step 2
ignore = util.always
dirignore = util.always
matchfn = match.matchfn
matchalways = match.always()
matchtdir = match.traversedir
dmap = self._map
listdir = osutil.listdir
lstat = os.lstat
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
exact = skipstep3 = False
if matchfn == match.exact: # match.exact
exact = True
dirignore = util.always # skip step 2
elif match.files() and not match.anypats(): # match.match, no patterns
skipstep3 = True
if not exact and self._checkcase:
normalize = self._normalize
skipstep3 = False
else:
normalize = None
# step 1: find all explicit files
results, work, dirsnotfound = self._walkexplicit(match, subrepos)
skipstep3 = skipstep3 and not (work or dirsnotfound)
work = [d for d in work if not dirignore(d[0])]
wadd = work.append
# step 2: visit subdirectories
while work:
nd, d = work.pop()
skip = None
if nd == '.':
nd = ''
d = ''
else:
skip = '.hg'
try:
entries = listdir(join(nd), stat=True, skip=skip)
except OSError, inst:
if inst.errno in (errno.EACCES, errno.ENOENT):
match.bad(self.pathto(nd), inst.strerror)
continue
raise
for f, kind, st in entries:
if normalize:
nf = normalize(nd and (nd + "/" + f) or f, True, True)
f = d and (d + "/" + f) or f
else:
nf = nd and (nd + "/" + f) or f
f = nf
if nf not in results:
if kind == dirkind:
if not ignore(nf):
if matchtdir:
matchtdir(nf)
wadd((nf, f))
if nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
elif kind == regkind or kind == lnkkind:
if nf in dmap:
if matchalways or matchfn(nf):
results[nf] = st
elif (matchalways or matchfn(f)) and not ignore(nf):
results[nf] = st
elif nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
for s in subrepos:
del results[s]
del results['.hg']
# step 3: visit remaining files from dmap
if not skipstep3 and not exact:
# If a dmap file is not in results yet, it was either
# a) not matching matchfn b) ignored, c) missing, or d) under a
# symlink directory.
if not results and matchalways:
visit = dmap.keys()
else:
visit = [f for f in dmap if f not in results and matchfn(f)]
visit.sort()
if unknown:
# unknown == True means we walked all dirs under the roots
# that wasn't ignored, and everything that matched was stat'ed
# and is already in results.
# The rest must thus be ignored or under a symlink.
audit_path = pathutil.pathauditor(self._root)
for nf in iter(visit):
# Report ignored items in the dmap as long as they are not
# under a symlink directory.
if audit_path.check(nf):
try:
results[nf] = lstat(join(nf))
# file was just ignored, no links, and exists
except OSError:
# file doesn't exist
results[nf] = None
else:
# It's either missing or under a symlink directory
# which we in this case report as missing
results[nf] = None
else:
# We may not have walked the full directory tree above,
# so stat and check everything we missed.
nf = iter(visit).next
for st in util.statfiles([join(i) for i in visit]):
results[nf()] = st
return results
def status(self, match, subrepos, ignored, clean, unknown):
'''Determine the status of the working copy relative to the
dirstate and return a pair of (unsure, status), where status is of type
scmutil.status and:
unsure:
files that might have been modified since the dirstate was
written, but need to be read to be sure (size is the same
but mtime differs)
status.modified:
files that have definitely been modified since the dirstate
was written (different size or mode)
status.clean:
files that have definitely not been modified since the
dirstate was written
'''
listignored, listclean, listunknown = ignored, clean, unknown
lookup, modified, added, unknown, ignored = [], [], [], [], []
removed, deleted, clean = [], [], []
dmap = self._map
ladd = lookup.append # aka "unsure"
madd = modified.append
aadd = added.append
uadd = unknown.append
iadd = ignored.append
radd = removed.append
dadd = deleted.append
cadd = clean.append
mexact = match.exact
dirignore = self._dirignore
checkexec = self._checkexec
copymap = self._copymap
lastnormaltime = self._lastnormaltime
# We need to do full walks when either
# - we're listing all clean files, or
# - match.traversedir does something, because match.traversedir should
# be called for every dir in the working dir
full = listclean or match.traversedir is not None
for fn, st in self.walk(match, subrepos, listunknown, listignored,
full=full).iteritems():
if fn not in dmap:
if (listignored or mexact(fn)) and dirignore(fn):
if listignored:
iadd(fn)
else:
uadd(fn)
continue
# This is equivalent to 'state, mode, size, time = dmap[fn]' but not
# written like that for performance reasons. dmap[fn] is not a
# Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
# opcode has fast paths when the value to be unpacked is a tuple or
# a list, but falls back to creating a full-fledged iterator in
# general. That is much slower than simply accessing and storing the
# tuple members one by one.
t = dmap[fn]
state = t[0]
mode = t[1]
size = t[2]
time = t[3]
if not st and state in "nma":
dadd(fn)
elif state == 'n':
mtime = int(st.st_mtime)
if (size >= 0 and
((size != st.st_size and size != st.st_size & _rangemask)
or ((mode ^ st.st_mode) & 0100 and checkexec))
or size == -2 # other parent
or fn in copymap):
madd(fn)
elif time != mtime and time != mtime & _rangemask:
ladd(fn)
elif mtime == lastnormaltime:
# fn may have been changed in the same timeslot without
# changing its size. This can happen if we quickly do
# multiple commits in a single transaction.
# Force lookup, so we don't miss such a racy file change.
ladd(fn)
elif listclean:
cadd(fn)
elif state == 'm':
madd(fn)
elif state == 'a':
aadd(fn)
elif state == 'r':
radd(fn)
return (lookup, scmutil.status(modified, added, removed, deleted,
unknown, ignored, clean))
def matches(self, match):
'''
return files in the dirstate (in whatever state) filtered by match
'''
dmap = self._map
if match.always():
return dmap.keys()
files = match.files()
if match.matchfn == match.exact:
# fast path -- filter the other way around, since typically files is
# much smaller than dmap
return [f for f in files if f in dmap]
if not match.anypats() and util.all(fn in dmap for fn in files):
# fast path -- all the values are known to be files, so just return
# that
return list(files)
return [f for f in dmap if match(f)]
|
[
"kailu.luke@gmail.com"
] |
kailu.luke@gmail.com
|
309a53e30762913d596f126d9171294a625257d7
|
0b106c3c614e6f8fc7df4d8349ef80de38ae9d43
|
/binaryTreeReverseLevelTraversal.py
|
a41b43a9ad978eec10eab883291b3f0266b5d029
|
[] |
no_license
|
shengng325/LeetCode.py
|
b28142421544ea6f04a84785429e79da46d4a1d6
|
ab8f72fbec259773f1b2ddadadec52f3132908ab
|
refs/heads/master
| 2023-02-21T08:11:56.930960
| 2021-01-17T04:00:40
| 2021-01-17T04:00:40
| 317,896,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
def binaryTreeReverseLevelTraversal(root):
queue = [root]
values = []
i = 0
while i < len(queue):
levelValues = []
levelSize = len(queue) - i
for _ in range(levelSize):
levelValues.append(queue[i].value)
if queue[i].left is not None:
queue.append(queue[i].left)
if queue[i].right is not None:
queue.append(queue[i].right)
i+=1
if len(levelValues) > 0:
values.append(levelValues)
return values
class BinaryTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
node1 = BinaryTree(1)
node2 = BinaryTree(2)
node3 = BinaryTree(3)
node4 = BinaryTree(4)
node5 = BinaryTree(5)
node6 = BinaryTree(6)
node7 = BinaryTree(7)
node8 = BinaryTree(8)
root = node1
root.left = node2
root.right = node3
root.left.left = node4
root.left.right = node5
root.right.left = node6
root.right.right = node7
root.left.left.left = node8
results = binaryTreeReverseLevelTraversal(root)
print(results)
|
[
"shengng325@gmail.com"
] |
shengng325@gmail.com
|
7f5840c3f48b0f8c0c441ea843770c08e781b974
|
acc9bade6e8041fe2c79c9120b732e46486cbc68
|
/source/conf.py
|
fc8a3ae4ec8b7117fa1c579ed0fa8f4d4ed75046
|
[] |
no_license
|
921138129/tensorflow-handbook
|
592e9983a79bea0da47ebf46f46cfa0026318942
|
2ea7ffcacfdd8d2e066d02fb44dcc4455ed7ed4c
|
refs/heads/master
| 2023-01-01T14:13:27.633627
| 2020-10-12T11:35:49
| 2020-10-12T11:35:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,732
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 简单粗暴TensorFlow documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 20 00:48:15 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# imgmath or mathjax
extensions = [ 'sphinx.ext.imgmath', 'sphinx.ext.intersphinx']
imgmath_latex_preamble = '\\usepackage{braket}\n\\usepackage[UTF8]{ctex}'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = '简单粗暴 TensorFlow 2'
copyright = '2018-2020, Xihan Li (snowkylin)'
author = 'Xihan Li (snowkylin)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4 beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = [
'build/*'
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'analytics_id': 'UA-40509304-12'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
# }
def setup(app):
app.add_stylesheet( "css/custom.css" )
app.add_javascript( "js/tw_cn.js" )
app.add_javascript( "js/pangu.min.js" )
app.add_javascript( "js/custom_20200921.js" )
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TensorFlowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': '\\usepackage{ctex}\n\\usepackage{braket}',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
# Remove blank pages
'classoptions': ',openany,oneside'
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index_latex_cn', 'tensorflow_handbook.tex', '简明的 TensorFlow 2',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorflow', '简单粗暴 TensorFlow 2',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TensorFlow', '简单粗暴 TensorFlow 2',
author, 'TensorFlow', '简单粗暴 TensorFlow 2',
'Miscellaneous'),
]
|
[
"xihanli316@gmail.com"
] |
xihanli316@gmail.com
|
ad5f9ab8ed91f75acb5f58158279cfaa8dbf1bbe
|
e8cb6b396c6717332fe457f7cfbc3a7727138ba7
|
/recruiting_system/settings.py
|
da35ad3802c8e53d2404b623c5b7fcecff7f2936
|
[] |
no_license
|
aleksey-tsyganov/django_recruit_app
|
633ba725cf322af586102dc1627ffa4c802898cb
|
974824a13aef1eef9dc1e8820c2c814f500f0a60
|
refs/heads/master
| 2021-09-27T00:14:19.533458
| 2020-02-28T14:51:52
| 2020-02-28T14:51:52
| 228,168,832
| 0
| 0
| null | 2021-09-22T18:08:30
| 2019-12-15T10:51:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,760
|
py
|
"""
Django settings for recruiting_system project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATETAGS_DIR = os.path.join(BASE_DIR, 'templatetags')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 't18xe$@e@3so&k5(kz&4ku@ju6zo$mcg$$umbb-3_j==h2g=f4'
SECRET_KEY = os.environ.get("DJANGO_SK")
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = (os.environ.get("DEBUG_VALUE") == "True")
ALLOWED_HOSTS = ["recruits-app.herokuapp.com"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'recruits',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'recruiting_system.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR, TEMPLATETAGS_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'recruiting_system.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIR = [
os.path.join(BASE_DIR, 'static'),
]
EMAIL_HOST = 'smtp.yandex.ru'
EMAIL_PORT = 465
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
EMAIL_USE_TLS = False
EMAIL_USE_SSL = True
CRISPY_TEMPLATE_PACK = 'bootstrap4'
django_heroku.settings(locals())
|
[
"aleksey_news@mail.ru"
] |
aleksey_news@mail.ru
|
1df5231363d599b7db0d0b58fb18abeed707e5b4
|
56cf4eb06d069e0315fbe89ce825060f109308f1
|
/HerokuBlog/create_config.py
|
eb53bc789b3971a75a1611446fa37c312e3c9aaf
|
[
"MIT",
"WTFPL"
] |
permissive
|
Lasher09/NLP
|
2e9757d2174de2056502517f3ce5eb1f35072813
|
78eadcd5c65d43c51a9aee6ee6b8651a74db7a98
|
refs/heads/master
| 2021-01-23T03:22:05.375901
| 2014-05-02T23:11:25
| 2014-05-02T23:11:25
| 18,149,264
| 2
| 0
| null | 2014-05-02T23:11:26
| 2014-03-26T18:36:19
|
CSS
|
UTF-8
|
Python
| false
| false
| 4,027
|
py
|
""" utility for generating a configuration file for a simple blog """
from werkzeug.security import generate_password_hash
from os import urandom
from base64 import b32encode
import sys
import getpass
if "--help" in sys.argv:
print "create_config.py:"
print "Options:"
print " * --fresh"
print " Over-write existing config if it exists"
print " * --update"
print " Update existing config (Default if the config exists)"
print " * --changepass"
print " Change the admin password"
sys.exit(1)
try:
import settings
if not "--fresh" in sys.argv and not "--changepass" in sys.argv:
sys.argv.append("--update")
print "Updating. Use --fresh to over-write existing config"
except (ImportError, SyntaxError):
settings = None
def input_with_default(*args, **kwargs):
_type = kwargs.pop("_type", None)
name, res = _input_with_default(*args, **kwargs)
if _type is None:
return name, res
else:
try:
return name, _type(res)
except ValueError:
print "Error: Value %s is not the correct type. Please re-enter" % res
return input_with_default(*args, _type=_type, **kwargs)
def _input_with_default(name, prompt, default, func=lambda v: v, _input_func=raw_input):
""" Small wrapper around raw_input for prompting and defaulting """
if ("--update" in sys.argv or ("--changepass" in sys.argv and name != "ADMIN_PASSWORD")) and settings is not None:
# We are updating. If the name already exists in the settings object
# then we ignore it and return the existing value
try:
return name, getattr(settings, name)
except AttributeError:
# Continue on and prompt the user
pass
response = _input_func("%s (Default %s) " % (prompt, default or "None"))
if not response:
return name, func(default)
return name, func(response)
# Most likely a more elegant way to do this. Oh well.
def input_password(*args, **kwargs):
# This should make input_with_default use getpass.getpass instead of raw_input, however
# in PyCharm this causes issues. Stick with raw-input for now.
name, response = input_with_default(*args, _input_func=getpass.getpass, **kwargs)
return name, generate_password_hash(response)
print "%s a Simple config file. Please answer some questions:" % ("Updating" if "--update" in sys.argv else "Generating")
SETTINGS = (
input_with_default("POSTS_PER_PAGE", "Posts per page", 5, _type=int),
input_with_default("POST_CONTENT_ON_HOMEPAGE", "Show the post content on the homepage",
"y", lambda v: v.lower()[0] == "y"),
input_with_default("ADMIN_USERNAME", "Admin username", "admin"),
input_password("ADMIN_PASSWORD", "Admin password", "password"),
input_with_default("ANALYTICS_ID", "Google analytics ID", ""),
input_with_default("GITHUB_USERNAME", "Github Username", ""),
input_with_default("CONTACT_EMAIL", "Contact Email", ""),
input_with_default("BLOG_TITLE", "Blog title", ""),
input_with_default("BLOG_TAGLINE", "Blog tagline", ""),
input_with_default("BLOG_URL", "Blog URL (e.g. /blog)", "/"),
input_with_default("FONT_NAME", "Font Name (Selected from google font library): ", "Source Sans Pro",
lambda v: v.replace(" ", "+")),
input_with_default("SECRET_KEY", "Secret key", b32encode(urandom(32))),
input_with_default("DISQUS_SHORTNAME", "Disqus Shortname", ""),
input_with_default("USE_ADDTOANY", "Enable AddToAny integration", "y", lambda v: v.lower()[0] == "y"),
input_with_default("USE_SUBTOME", "Enable SubToMe integration", "n", lambda v: v.lower()[0] == "y"),
)
with open("settings.py", "w") as fd:
fd.write("# -*- coding: utf-8 -*-\n")
for name, value in SETTINGS:
if isinstance(value, basestring):
value = "'%s'" % value.replace("'", "\\'")
fd.write("%s = %s\n" % (name, value))
fd.flush()
print "Created!"
|
[
"jacobhill.mail@gmail.com"
] |
jacobhill.mail@gmail.com
|
b6ea7ffc2b3e4e46027e80fd3fd7deccfbd362ea
|
5c9d926f2d0d28f211e29ae6901791864e1c6f15
|
/04.4_foods.py
|
805502920ce7bd03fc805791e2baf62471c35e09
|
[] |
no_license
|
hedongjie0917/Python_3
|
6fa48ff0fa730c8900133c4ac49503f741f55554
|
4251e488c2ee123d951bfe93e75c442b2ffdcc3f
|
refs/heads/master
| 2020-03-09T06:55:41.348736
| 2018-04-09T14:39:41
| 2018-04-09T14:39:41
| 128,604,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
#使用[:]来复制列表
my_foods=['pizza','falafel','carrot','cake','tomato']
friend_foods=my_foods[:]
print('p1')
print(my_foods)
print('\np2')
print(friend_foods)
my_foods.append('cannoli')
friend_foods.append('ice cream')
print('\np3')
print('my favorite foods are:')
print(my_foods)
print('\np4')
print('my firend foods are:')
print(friend_foods)
print('\np5')
print('The first three items in my_foods are:')
print(my_foods[:3])
print('\np6')
print('Three items from the middle of the list are:')
print(my_foods[1:4])
print('\np7')
print('The last three items in the list are:')
print(my_foods[-3:])
|
[
"hedongjie0917@users.github.com"
] |
hedongjie0917@users.github.com
|
ed16e9ec3b200016e9c915db3bb00f5e85bbd1ca
|
09b8f731cf118d4e241e9d8cd2b7ae8ba6149c6c
|
/myvenv/bin/pip
|
0611f8990f1b17a83d4bb9a462510001a9dd515a
|
[] |
no_license
|
prianthon/djangogirls
|
11e152fc69910490fda6c11b30afb879a5ebda1d
|
d53043dd0654760cedf3150b5be25e05aaa3a716
|
refs/heads/master
| 2021-01-13T15:56:47.699691
| 2016-12-19T16:28:32
| 2016-12-19T16:28:32
| 76,790,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
#!/Users/prianthon/BakPasirPython/djangogirls/myvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"prianthonsubardio@gmail.com"
] |
prianthonsubardio@gmail.com
|
|
e68e418a9b3c3a3921cc91ba24214d4e84f94f07
|
558ed97a6529eac4148f802bc6b96003d5f0d187
|
/store/views.py
|
0f7b5185e16c44e2eb3a4f0143d9ea3c4cf0f06d
|
[] |
no_license
|
carlosCeron/ecommerce
|
f067769521ae964f5bb9ad2708e273a66af68bfa
|
786b239f0693d2d4e22973a087a39fd5d90a3a2c
|
refs/heads/master
| 2023-04-20T15:18:33.417700
| 2021-04-29T05:12:17
| 2021-04-29T05:12:17
| 356,928,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from django.shortcuts import render, get_object_or_404
from .models import Category, Product
def categories(request):
categories_list = Category.objects.all()
return {'categories': categories_list}
def all_products(request):
products = Product.objects.all()
return render(request, 'store/home.html', {'products': products})
def product_detail(request, slug):
product = get_object_or_404(Product, slug=slug, in_stock=True)
return render(request, 'store/products/detail.html', {'product': product})
def categories_list(request, category_slug):
list_category = Category.objects.all()
return render(request, 'store/category/list.html', {'categories': list_category})
|
[
"carlos.ceron.1986@gmail.com"
] |
carlos.ceron.1986@gmail.com
|
410a9f4fdbc38abdff9e31907116f90799c79832
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/aio/operations_async/_load_balancers_operations_async.py
|
04d987ad2e60383b55b248cf3af0ab31c98d809b
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 21,632
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancersOperations:
"""LoadBalancersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> None:
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.LoadBalancer":
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancer, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.LoadBalancer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancer"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
parameters: "models.LoadBalancer",
**kwargs
) -> "models.LoadBalancer":
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancer"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LoadBalancer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
parameters: "models.LoadBalancer",
**kwargs
) -> "models.LoadBalancer":
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load balancer operation.
:type parameters: ~azure.mgmt.network.v2017_06_01.models.LoadBalancer
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: LoadBalancer, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.LoadBalancer
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.LoadBalancerListResult"]:
"""Gets all the load balancers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_06_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.LoadBalancerListResult"]:
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_06_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'} # type: ignore
|
[
"noreply@github.com"
] |
YijunXieMS.noreply@github.com
|
7b21bfe2eaeaf827ca3442ea457cf38d94404b66
|
f3e99347c2048312751370caeb85d8a5c31fd1d0
|
/Assignment 3/Python/FakerGeneration.py
|
795df619f5796494aabf3e6269e0f82d4da176e8
|
[] |
no_license
|
C-Mitch/Database-Management
|
ce18f17d30ce13517b57e64201dd2771d4b37ef2
|
fc8c7ab0ea3c9b1425d482341e3c9e92cf24631e
|
refs/heads/master
| 2020-04-25T00:55:46.403713
| 2019-04-17T23:30:23
| 2019-04-17T23:30:23
| 172,393,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
#Chase Mitchell
#002274657
#Mitch213@mail.chapman.edu
#CPSC-408-01
#Assignment3
import sys
import csv
from faker import Faker
if len(sys.argv) == 3:
if sys.argv[2].isdigit():
rows = int(sys.argv[2])
with open(sys.argv[1], 'w', newline='') as file:
faker = Faker()
columns = ['FirstName', 'LastName', 'DateOfBirth', 'SSN', 'Address',
'Email', 'Country', 'Job', 'JobAddress', 'UserName', 'Password', 'PID']
csvWriter = csv.DictWriter(file, fieldnames=columns)
csvWriter.writeheader()
while rows > 0:
csvWriter.writerow({
'FirstName': faker.first_name(),
'LastName': faker.last_name(),
'DateOfBirth': faker.date(pattern="%Y-%m-%d", end_datetime="-10y"),
'SSN': faker.itin(),
'Address': faker.street_address(),
'Email': faker.email(),
'Country': faker.country(),
'Job': faker.job().replace(",", ''),
'JobAddress': faker.street_address(),
'UserName': faker.user_name(),
'Password': faker.password(),
'PID': faker.ean8()
})
rows -= 1
print("File Generated Successfully")
else:
print("Please Pass An Integer Value As The Second Argument At Execution")
else:
print("Please Input A File Name And Number Of Rows To Be Generated At Execution")
|
[
"C-Mitch@users.noreply.github.com"
] |
C-Mitch@users.noreply.github.com
|
02b751ed22fd231064bb7d2b92c0d7ec080c721e
|
76cc692b34c11db1ae7e8d1c6879453c1e689eaf
|
/up2/pesos.py
|
ca07f4956bc3a09d092b288420f8a2d3fc6e912d
|
[] |
no_license
|
btabuenca/sbc-1
|
440f0c2a9265db4e39cbb71f53bda661ddf24d28
|
5296b48b86d0477ad35420b29456da99fdcd3449
|
refs/heads/master
| 2021-02-15T00:27:03.128125
| 2020-01-30T17:27:48
| 2020-01-30T17:27:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
#!/usr/bin/python3
import mraa
import time
import spidev
from time import sleep
import os
spi = spidev.SpiDev()
spi.open(1,0)
luz = 0
espera = 1
# Use pin 7 by default
pin_r = 22
pin_g = 27
pin_b = 23
# Export the GPIO pin for use
pin_r = mraa.Gpio(pin_r,owner=False,raw=True)
pin_g = mraa.Gpio(pin_g,owner=False,raw=True)
pin_b = mraa.Gpio(pin_b,owner=False,raw=True)
pin_r.dir(mraa.DIR_OUT)
pin_g.dir(mraa.DIR_OUT)
pin_b.dir(mraa.DIR_OUT)
# Small delay to allow udev rules to execute (necessary only on up)
time.sleep(1)
# Configure the pin direction
pin_r.dir(mraa.DIR_OUT)
pin_g.dir(mraa.DIR_OUT)
pin_b.dir(mraa.DIR_OUT)
def leer(canal):
rawData = spi.xfer([1,(8+canal) << 4,0])
processedData = ((rawData[1]&3) << 8)+ rawData[2]
return processedData
# Loop
while True:
datos= leer(luz)
print (datos)
if datos > 700:
pin_r.write(1)
print ("Encender led")
else:
pin_r.write(0)
sleep(espera)
|
[
"noreply@github.com"
] |
btabuenca.noreply@github.com
|
5a2c1271ed984e6a51ea27f0c50420b891db695f
|
a7ae573e0403a0eb24bab2a787dd16ff9aa6025a
|
/Data_Analyst/pandas/pandas_test.py
|
0c2b262ae925d2022d204016a07aafe4479379e3
|
[] |
no_license
|
isyefeng/python-test
|
c2538c91ad28e442f9946d999a684e8d14b5368b
|
0b883cc95359562d00c4710e624a57fbc9a23fb3
|
refs/heads/master
| 2021-06-23T22:14:36.345281
| 2019-12-20T10:41:22
| 2019-12-20T10:41:22
| 192,188,404
| 1
| 0
| null | 2021-06-10T22:16:41
| 2019-06-16T12:29:02
|
Python
|
UTF-8
|
Python
| false
| false
| 702
|
py
|
import pandas as pd
'''Series'''
s1 = pd.Series([1,2,3,4,5])
print(s1)
print(s1.index) #查看索引
print(s1.values) #查看值
#更改索引index
s2 = pd.Series(['a','b','c','d','e'],index=[1,2,3,4,5])
print(s2)
#传入字典
s3 = pd.Series({'a':1,'b':2,'c':3})
print(s3)
'''DataFrame'''
s4 = pd.DataFrame([['a','b'],['c','d'],['e','f']])
print(s4)
s5 = pd.DataFrame([['a','b'],['c','d'],['e','f']],
columns = ['frist','last'], #设置行索引
index = [1,2,3]) #设置列索引
print(s5)
'''传入字典方式创建,key相当于列索引'''
s6 = pd.DataFrame({'frist':[2,3,4],'last':[6,7,8]})
print(s6)
print(s6.index)
print(s6.columns)
|
[
"530030302@qq.com"
] |
530030302@qq.com
|
f37760154f489059ee55d89018ef5d3c89eb8bbe
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03031/s605906713.py
|
7276915a941904b8f81053a9d5b6b9e4110970f7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
n,m=map(int,input().split())
K=[]
S=[]
for i in range(m):
tmp=list(map(int,input().split()))
K.append(tmp[0])
S.append(tmp[1:])
P=list(map(int,input().split()))
ans=0
for i in range(2**n):
bit=format(i,'0'+str(n)+'b')
bit=list(map(int,bit))
on_den=0
for den in range(m):
on_swi=0
for s in S[den]:
on_swi+=bit[s-1]
if on_swi%2==P[den]:
on_den+=1
if on_den==m:
ans+=1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c8b6b5fb00eff87d9278287b58bc5e99a12029f3
|
b7be705ab774860f8a5a49d756f50994a37e307d
|
/access-control/python/iot_access_control/hardware/grove.py
|
64ab121a01fba3241ba216bba5cc2e068af1901a
|
[
"MIT"
] |
permissive
|
intel-iot-devkit/how-to-code-samples
|
b87916f7e919d859a5f4084f40e8bd62007a8469
|
821de0727b999391131e6947868371b1424c1d39
|
refs/heads/master
| 2023-01-05T04:30:54.650902
| 2023-01-03T22:56:31
| 2023-01-03T22:56:31
| 45,066,968
| 204
| 161
| null | 2017-11-13T20:57:42
| 2015-10-27T20:08:02
|
C++
|
UTF-8
|
Python
| false
| false
| 3,155
|
py
|
# Copyright (c) 2015 - 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from upm.pyupm_jhd1313m1 import Jhd1313m1
from upm.pyupm_biss0001 import BISS0001
from mraa import addSubplatform, GENERIC_FIRMATA
from ..config import HARDWARE_CONFIG, KNOWN_PLATFORMS
from .board import Board, PinMappings
from .events import MOTION_DETECTED
class GroveBoard(Board):
"""
Board class for Grove hardware.
"""
def __init__(self):
super(GroveBoard, self).__init__()
# pin mappings
self.pin_mappings = PinMappings(
motion_pin=4,
i2c_bus=6
)
if HARDWARE_CONFIG.platform == KNOWN_PLATFORMS.firmata:
addSubplatform(GENERIC_FIRMATA, "/dev/ttyACM0")
self.pin_mappings += 512
self.pin_mappings.i2c_bus = 512
self.motion = BISS0001(self.pin_mappings.motion_pin)
self.screen = Jhd1313m1(self.pin_mappings.i2c_bus, 0x3E, 0x62)
self.last_motion = False
def update_hardware_state(self):
"""
Update hardware state.
"""
# detect motion change
current_motion = self.detect_motion()
if current_motion != self.last_motion:
if current_motion:
self.trigger_hardware_event(MOTION_DETECTED)
self.last_motion = current_motion
# hardware functions
def detect_motion(self):
"""
Check PIR motion sensor.
"""
return self.motion.motionDetected()
def write_message(self, message, line=0):
"""
Write message to LCD screen.
"""
message = message.ljust(16)
self.screen.setCursor(line, 0)
self.screen.write(message)
def change_background(self, color):
"""
Change LCD screen background color.
"""
colors = {
"red": lambda: self.screen.setColor(255, 0, 0),
"blue": lambda: self.screen.setColor(0, 0, 255),
"white": lambda: self.screen.setColor(255, 255, 255)
}
colors.get(color, colors["white"])()
|
[
"erik@snowcrabsoftware.com"
] |
erik@snowcrabsoftware.com
|
0bc7926825cc6eb9a6d7d563f069e0b7bb858835
|
68ca39ec51839c1be20bf3a05178a087f7294afc
|
/working-examples/udemy/fanout-consumer.py
|
d37ef58f27e7d94dd79d372d54bf6e042e280a7c
|
[] |
no_license
|
CariZa/rabbitmq-test
|
49db02420fb0fe336657054a1166d43a85c042b8
|
5bd274a193bfa7681a66b6aa2f536ea8f19b5108
|
refs/heads/master
| 2020-03-30T01:24:13.021028
| 2018-10-02T10:36:46
| 2018-10-02T10:36:46
| 150,574,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
#!/usr/bin/env python
import pika
import time
class consume_engine:
def __init__(self):
self._messages = 10
self._message_interval = 5
self._queue_name = None
self._connection = None
self._channel = None
self._exchange = "score.feed.exchange"
def connection(self):
credentials = pika.PlainCredentials('guest', 'guest')
# 54.200.201.1
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials, socket_timeout=300)
self._connection = pika.BlockingConnection(parameters)
print("Connected Successfully !!!")
return self._connection
def channel(self):
self._channel = self._connection.channel()
print("Channel opened...")
def declare_exchange(self):
self._channel.exchange_declare(exchange=self._exchange,
exchange_type='fanout')
print("Exchange declared....")
def declare_queue(self):
result = self._channel.queue_declare(exclusive=True)
self._queue_name = result.method.queue
print("Queue declared....")
print(' [*] Waiting for messages. To exit press CTRL+C')
def make_binding(self):
self._channel.queue_bind(exchange=self._exchange,
queue=self._queue_name)
print("Made binding between exchange: %s and queue: %s" %(self._exchange, self._queue_name))
def on_message(self, channel, method, properties, body):
print(" [x] Feed Received - %s \n" % str(body))
time.sleep(2)
def consume_messages(self):
self._channel.basic_consume(self.on_message,
queue=self._queue_name, no_ack=True)
self._channel.start_consuming()
def run(self):
self.connection()
self.channel()
self.declare_exchange()
self.declare_queue()
self.make_binding()
self.consume_messages()
if __name__ == '__main__':
engine = consume_engine()
engine.run()
|
[
"liebenbc@yahoo.com"
] |
liebenbc@yahoo.com
|
4142a01562511f31b61f45cf79e8bee27a8961d3
|
93b4403ef6ce0d8067694d69fbdcfbb4c8107013
|
/if_else_2.py
|
94fe4058f6f94b5b6c53c4f6045849990098ed18
|
[] |
no_license
|
anzunming/code
|
6817efe6f3f235356c7a8492ab80e17dfed2a8fe
|
0b657cf264d9639c86c18331ed4219fe6dd0d16d
|
refs/heads/master
| 2021-10-22T22:39:35.686920
| 2019-03-13T06:35:11
| 2019-03-13T06:35:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
#!/usr/bin/env python
#coding:utf-8
num1 = 50
if num1 >= 0 and num1 <= 100:
print u"num1 在0~100之间"
num2 = 35
if num2 < 0 or num2 > 100:
print u"num2 大于100 或者 num2 小于0"
|
[
"2190881697@qq.com"
] |
2190881697@qq.com
|
e770e7733fb63098ada0675deb5a4a5f88ddbcfa
|
7a6f8c19dbe42097b12175c685278a34459301b9
|
/python_codes/multiples_of_3_and_5.py
|
01ee4a0ac43e7abcca64f6c9d8538b8005b12347
|
[] |
no_license
|
shripadtheneo/project_euler
|
14d7f70832f7b716bdafd4c8a9309e1a21dcdf16
|
f8341f65945c2792069d82cf310141eb43a56efe
|
refs/heads/master
| 2021-01-10T03:50:17.409065
| 2016-02-25T10:31:07
| 2016-02-25T10:31:07
| 51,155,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these
multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
def alternate():
n = 0
data = 0
while n < 1000:
if not (n % 3) or not (n % 5):
data += n
n += 1
print(data)
if __name__ == '__main__':
alternate()
|
[
"shripadtheneo@gmail.com"
] |
shripadtheneo@gmail.com
|
7607ffb566a61ce446317f1daf35d5b9c67d9cb7
|
5d5463c596f7f504be27edbb7d695d0a1d08972b
|
/NeoML/Python/neoml/CrossValidation.py
|
769f24b85b91cbd54951ab0a7f4f5f0e34f80346
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"NCSA",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-arm-llvm-sga",
"Intel",
"LLVM-exception",
"Apache-2.0"
] |
permissive
|
fernandocyder/neoml
|
adc48c476301012dbc2796c7c0d7f9f506fd14a3
|
881815bb105bd8893da931905fcd6e8581a63ada
|
refs/heads/master
| 2023-04-06T10:29:20.384463
| 2021-04-14T11:15:25
| 2021-04-14T11:15:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,800
|
py
|
""" Copyright (c) 2017-2021 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------
"""
import numpy
from .Utils import convert_data, get_data
from scipy.sparse import csr_matrix, issparse
import neoml.PythonWrapper as PythonWrapper
def cross_validation_score(classifier, X, Y, weight=None, score="accuracy", parts=5, stratified=False):
"""Gets the classification results for the input sample.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input vectors, put into a matrix. The values will be
converted to ``dtype=np.float32``. If a sparse matrix is
passed in, it will be converted to a sparse ``csr_matrix``.
Y : array-like of shape (n_samples,)
Correct function values (``float``) for the training set vectors.
weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
score : {'accuracy', 'f1'}, default='accuracy'
parts :
stratified: "It is guaranteed that the ratio of classes in each part is (almost) the same as in the input data"
Return values
-------
predictions : generator of ndarray of shape (n_samples, n_classes)
The predictions of class probability for each input vector.
"""
x = convert_data( X )
y = numpy.array( Y, dtype=numpy.int32, copy=False )
if x.shape[0] != y.size:
raise ValueError('The `X` and `Y` inputs must be the same length.')
if weight is None:
weight = numpy.ones(y.size, numpy.float32)
else:
weight = numpy.array( weight, dtype=numpy.float32, copy=False )
if numpy.any(y < 0):
raise ValueError('All `Y` elements must be >= 0.')
if numpy.any(weight < 0):
raise ValueError('All `weight` elements must be >= 0.')
if score != "accuracy" and score != "f1":
raise ValueError('The `score` must be one of: `accuracy`, `f1`.')
if parts <= 0 or parts >= y.size / 2:
raise ValueError('`parts` must be in (0, vectorCount).')
return PythonWrapper._cross_validation_score(classifier, *get_data(x), int(x.shape[1]), y, weight, score, parts, bool(stratified))
|
[
"noreply@github.com"
] |
fernandocyder.noreply@github.com
|
72603e37ac9301b0811965a1b3bb4fca2ecc8f18
|
d677565818d7a3813c4aecc0ffd1da8af97e9513
|
/2.Accessing-Text-Corpora-and-Lexcial-Resoutces/2.5-WordNet-synset.py
|
25961f7a80c270a385332a16fc5c13521dce3422
|
[] |
no_license
|
laixintao/NLP-with-python-demo-code
|
58f62a2a0abb80f948d09707989f9d351a794699
|
25d7987faf7c48da8ea29007c20b8bcb1c33da63
|
refs/heads/master
| 2021-01-23T05:39:27.506752
| 2015-07-21T04:03:17
| 2015-07-21T04:03:17
| 37,509,805
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
import nltk
from nltk.corpus import wordnet as wn
motorcar = wn.synset('car.n.01')
types = motorcar.hyponyms()
print types[26]
# code in books forget 2()
print sorted([lemma.name() for synset in types for lemma in synset.lemmas()])
|
[
"799303812@163.com"
] |
799303812@163.com
|
5f590c51a421a9d6091983afe3ce5055d3d916f2
|
f8a348053df9118331924c7c004714c6679098b7
|
/main/veh.py
|
f55ff5bdc87ff5e221b7f5ce6c4512ac329c4caf
|
[] |
no_license
|
duyunhe/transportDepartment
|
03ec21535a4dc023b8ae7c5ff49bb3eb974a629e
|
592f2364a53c60f696bdd6d4691e5a16ccfec993
|
refs/heads/master
| 2020-05-27T19:21:45.993701
| 2019-12-19T03:23:48
| 2019-12-19T03:23:48
| 188,760,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,328
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/8/21 14:10
# @Author : yhdu@tongwoo.cn
# @简介 :
# @File : veh.py
import xlrd
import cx_Oracle
import os
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.AL32UTF8'
def get_filtered_veh():
fp = open("./data/filterAccOffISU.txt")
line = fp.readline()
isu_list = line.strip('\n').split(',')
fp.close()
conn = cx_Oracle.connect("lklh", "lklh", "192.168.0.113/orcl")
cur = conn.cursor()
sql = "select mdt_no, vehi_no from vw_vehicle t"
cur.execute(sql)
veh_dict = {}
for item in cur:
mdt, veh = item[:]
veh_dict[mdt] = veh
set_400 = set()
no_cnt = 0
for isu in isu_list:
try:
veh = veh_dict[isu]
set_400.add(veh)
except KeyError:
no_cnt += 1
cur.close()
conn.close()
return set_400
def get_veh1():
veh_list = []
xl = xlrd.open_workbook('./data/sup60_8.xlsx')
sheet = xl.sheet_by_index(0)
for i in range(4):
val = sheet.cell(i, 0).value
str_val = val.encode('utf-8')
veh_list.append(str_val)
print "no data", len(veh_list)
return veh_list
def load_txt():
fp = open("./data/filterAccOffISU.txt")
line = fp.readline()
isu = line.strip('\n').split(',')
fp.close()
print "load isu", len(isu)
return isu
def load_filtered_veh():
isu_list = load_txt()
conn = cx_Oracle.connect("lklh", "lklh", "192.168.0.113/orcl")
cur = conn.cursor()
sql = "select mdt_no, vehi_no from vw_vehicle t"
cur.execute(sql)
veh_dict = {}
for item in cur:
mdt, veh = item[:]
veh_dict[mdt] = veh
set_400 = set()
no_cnt = 0
for isu in isu_list:
try:
veh = veh_dict[isu]
set_400.add(veh)
except KeyError:
no_cnt += 1
return set_400
def get_veh2_without_accoff_filter():
veh_list = set()
xl = xlrd.open_workbook('./data/sup15.xlsx')
sheet = xl.sheet_by_index(0)
n = sheet.nrows
for i in range(n):
val = sheet.cell(i, 0).value
str_val = val.encode('utf-8')
if str_val[-1] == ' ':
str_val = str_val[:-1]
veh_list.add(str_val)
no_filter_set = list(veh_list - get_filtered_veh())
print "all no filter", len(no_filter_set), "veh"
return no_filter_set
def get_veh2():
veh_list = ['浙A2X302']
return veh_list
def get_veh2_with_accoff_filter():
veh_list = set()
xl = xlrd.open_workbook('./data/sup15.xlsx')
sheet = xl.sheet_by_index(0)
n = sheet.nrows
for i in range(n):
val = sheet.cell(i, 0).value
str_val = val.encode('utf-8')
if str_val[-1] == ' ':
str_val = str_val[:-1]
veh_list.add(str_val)
no_filter_set = list(veh_list & get_filtered_veh())
print "all no filter", len(no_filter_set), "veh"
return no_filter_set
def get_veh_city():
veh_list = []
xl = xlrd.open_workbook('./veh/sup_city.xlsx')
sheet = xl.sheet_by_index(0)
n = sheet.nrows
for i in range(1, n):
val = sheet.cell(i, 1).value
str_val = val.encode('utf-8')
veh_list.append(str_val)
print "city data", len(veh_list)
return veh_list
def get_veh_3_test():
veh_list = ['浙A0F964']
return veh_list
|
[
"a_crux@126.com"
] |
a_crux@126.com
|
40ff09d2b118f7b5ca02371691a7edf650285340
|
3e0daa411f9c068286848b0794f0a7b42083d3ed
|
/tutorial/tutorial/spiders/QA_spider.py
|
6a6776edf277162c4d3f0cc5bf92f9a3d2b5bff3
|
[] |
no_license
|
eyelc/-
|
1dd98a370393c1e6e9539d90c45dc6cd0b5475f3
|
295ad63120f5709441446919abd73fc074b477b6
|
refs/heads/master
| 2020-04-13T15:20:53.650238
| 2018-12-27T11:50:14
| 2018-12-27T11:50:14
| 163,288,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,413
|
py
|
from scrapy.http import FormRequest
from tutorial.items import QA_spiderItem
import scrapy
import json
class nextSpiderSpider(scrapy.Spider):
name = "QA_spider"
allowed_domains = ["eastmoney.com"]
start_urls = ['http://ask.eastmoney.com/wenda/QAInterfaceRead.aspx']
def start_requests(self):
url = "http://ask.eastmoney.com/wenda/QAInterfaceRead.aspx"
requests = []
for i in range(1,10):
# unicornHeader = {
# 'Host': 'ask.eastmoney.com',
# 'Origin': 'http://ask.eastmoney.com',
# 'Referer': 'http://ask.eastmoney.com/detail.html?qid=197899100333674496',
# }
#
# formdata = {'url':'QAApi/QA/GetQuestionDetail',
# 'key':'{"QId":"197899100333674496","PageNo":'+ str(i) +',"PageSize":40,"OnlyBest":0,"AppId":"EM_PC_Web"}'
# }
unicornHeader = {
'Host': 'ask.eastmoney.com',
'Origin': 'http://ask.eastmoney.com',
'Referer': 'http://ask.eastmoney.com',
}
formdata = {'url':'QAApi/QA/GetQAList',
'key':'{"PageNo":'+ str(i) +',"PageSize":20,"SortType":0,"AppId":"EM_PC_Web"}'
}
request = FormRequest(
url,
headers = unicornHeader,
callback=self.parse,
method = 'POST',
formdata=formdata)
requests.append(request)
return requests
def parse(self, response):
jsonBody = json.loads(response.body)
QAnswer = jsonBody['Data']['QAList']
for dz in QAnswer:
myquestion = dz.get('Question').get('Summary')
myqid = dz.get('Question').get('QId')
print( myqid + '_' + myquestion)
# myquestion = QAnswer.get('QuestionUser').get('Question').get('Content')
# print('-----------------------------------------------------------------')
# print(myquestion)
# print('-----------------------------------------------------------------')
# answer_data = QAnswer.get('AnswerUserList')
# for dz in answer_data:
# myanswer = dz.get('Answer').get('Content')
# print(myanswer)
|
[
"noreply@github.com"
] |
eyelc.noreply@github.com
|
eda3e0618143dbf66b6161035250d455fd62c200
|
1261bc255ed3df9ed760e1fa70ad58dbc4e52c30
|
/0x0B-python-input_output/1-number_of_lines.py
|
0e518a76aca688a95e4a7fc524991eb8fc8a5671
|
[] |
no_license
|
MIlenaMontoya/holbertonschool-higher_level_programming
|
77ece3156d9c0490c69090665b79e1c16def02d1
|
9b6942b509bd32cd8f3570d23277404631096e7d
|
refs/heads/master
| 2023-03-07T03:08:27.689032
| 2021-02-10T04:40:48
| 2021-02-10T04:40:48
| 291,843,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
#!/usr/bin/python3
""" Docstring """
def number_of_lines(filename=""):
"""[summary]
Args:
filename (str, optional): [description]. Defaults to "".
"""
line = 0
with open(filename, encoding="utf-8") as file:
for i in file:
line += 1
return line
|
[
"1852@holbertonschool.com"
] |
1852@holbertonschool.com
|
cfb23408c50eb12cb0018c0b1ec33e872eb10939
|
945c892bb51681a56dc6a4dd850a712d0a4f21ed
|
/deepaudio/speaker/criterion/pyannote_aamsoftmax/aamsoftmax.py
|
0e3ddda1d0e7e4d59e4191bd0cfd0bd0166f04de
|
[] |
no_license
|
zycv/deepaudio-speaker
|
ac3a465ef2283287af206e987a7d01296e6fb744
|
d4fd1e2ee294997b2a12245d86a88e808dc6fc7a
|
refs/heads/main
| 2023-08-26T07:36:31.976990
| 2021-11-12T10:00:21
| 2021-11-12T10:00:21
| 420,604,779
| 0
| 0
| null | 2021-10-31T11:31:43
| 2021-10-24T06:23:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,120
|
py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from omegaconf import DictConfig
from .. import register_criterion
from .configuration import PyannoteAAMSoftmaxConfigs
class ArcLinear(nn.Module):
"""Additive Angular Margin classification module
Parameters
----------
nfeat : int
Embedding dimension
nclass : int
Number of classes
margin : float
Angular margin to penalize distances between embeddings and centers
scale : float
Scaling factor for the logits
"""
def __init__(self, nfeat, nclass, margin, scale):
super(ArcLinear, self).__init__()
eps = 1e-4
self.min_cos = eps - 1
self.max_cos = 1 - eps
self.nclass = nclass
self.margin = margin
self.scale = scale
self.W = nn.Parameter(Tensor(nclass, nfeat))
nn.init.xavier_uniform_(self.W)
def forward(self, x, target=None):
"""Apply the angular margin transformation
Parameters
----------
x : `torch.Tensor`
an embedding batch
target : `torch.Tensor`
a non one-hot label batch
Returns
-------
fX : `torch.Tensor`
logits after the angular margin transformation
"""
# normalize the feature vectors and W
xnorm = F.normalize(x)
Wnorm = F.normalize(self.W)
target = target.long().view(-1, 1)
# calculate cosθj (the logits)
cos_theta_j = torch.matmul(xnorm, torch.transpose(Wnorm, 0, 1))
# get the cosθ corresponding to the classes
cos_theta_yi = cos_theta_j.gather(1, target)
# for numerical stability
cos_theta_yi = cos_theta_yi.clamp(min=self.min_cos, max=self.max_cos)
# get the angle separating xi and Wyi
theta_yi = torch.acos(cos_theta_yi)
# apply the margin to the angle
cos_theta_yi_margin = torch.cos(theta_yi + self.margin)
# one hot encode y
one_hot = torch.zeros_like(cos_theta_j)
one_hot.scatter_(1, target, 1.0)
# project margin differences into cosθj
return self.scale * (cos_theta_j + one_hot * (cos_theta_yi_margin - cos_theta_yi))
@register_criterion("pyannote_aamsoftmax", dataclass=PyannoteAAMSoftmaxConfigs)
class PyannoteAAMSoftmax(nn.Module):
def __init__(self,
configs: DictConfig,
num_classes: int,
embedding_size: int
) -> None:
super(PyannoteAAMSoftmax, self).__init__()
self.configs=configs
self.classifier_ = ArcLinear(
nfeat=self.configs.model.embed_dim,
nclass=num_classes,
margin=configs.criterion.margin,
scale=configs.criterion.scale
)
self.logsoftmax_ = nn.LogSoftmax(dim=1)
self.loss_ = nn.NLLLoss()
def forward(self, embeddings: Tensor, targets: Tensor) -> Tensor:
logits = self.logsoftmax_(self.classifier_(embeddings, target=targets))
return self.loss_(logits, targets)
|
[
"yinruiqing110@gmail.com"
] |
yinruiqing110@gmail.com
|
1b35c07f508e1d8555d44da1605ec5ab4e46b1f3
|
f1bab6ed438915aae4786ea16542bfc53fc44f44
|
/workshop.py
|
afc3f08b388995ea0e1dc803b82afdd3e2f1e289
|
[] |
no_license
|
kamonchat26/workshop
|
aefefb1ca70f529eecd17f615fb1dcea88b4844a
|
19bb20f7f5947a42e6269abe00254de442d518de
|
refs/heads/master
| 2023-02-10T08:36:39.894528
| 2021-01-09T05:10:17
| 2021-01-09T05:10:17
| 328,075,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
print("Hello world")
print("Name : kamonchat")
print("Age : 22")
|
[
"kamonchat2607@gmail.com"
] |
kamonchat2607@gmail.com
|
8a784fbde755f1cd3ed6366feb3e084114970342
|
3a76cc5ecfcfb7292236e3a49ee7e06753da5504
|
/awwa/tests.py
|
b7365e460c8a7e26e7ce919c98618d3006541a0f
|
[
"MIT"
] |
permissive
|
Emmanuel-otieno/Awward_clone
|
5e0a4624f990448bc61bc68289c938e4c4266c2e
|
ce0fb841984cae619599b51600403d7a1d873fc8
|
refs/heads/main
| 2023-02-25T03:09:29.687058
| 2021-01-26T04:46:46
| 2021-01-26T04:46:46
| 331,907,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
from django.test import TestCase
from .models import Project
from django.contrib.auth.models import User
# Create your tests here.
class ProjectTestClass(TestCase):
'''
Test case for the Project class and it's behaviours
'''
def setUp(self):
return Projects.objects.create()
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.Chi_Gallery,Project))
#Testing save method
def test_save_method(self):
self.Chi_Gallery.save_project()
projects = Project.objects.all()
self.assertTrue(len(projects) > 0)
def setUp(self,project_name='Chi_Gallery',project_photo='tree.png',description='some description',github_repo='git repo',url='tree.com',uploader='chinchillah'):
return Projects.objects.create(project_name=project_name, project_photo=project_photo, description=description, github_repo=github_repo, url=url, uploader=uploader)
def projectSave(self):
initialization = self.setUp()
self.assertTrue(save > 0)
|
[
"sakoemmanuel4@gmail.com"
] |
sakoemmanuel4@gmail.com
|
bad2e9b0b94246a6809cee2a530e2dbc8cbbd7fe
|
9b32d75490355e69d97008584c702cab66a221da
|
/payments/admin.py
|
349e545a164e71bdee49e92fdb4b37618aacc16d
|
[] |
no_license
|
cferreiras/mexican-delivery
|
1c37d798d262d8763a183809cd2e4275d64ab33f
|
54f0af0f53a14c2b0a54024225c916fc0400c265
|
refs/heads/master
| 2023-09-06T03:44:40.837527
| 2021-11-05T22:59:24
| 2021-11-05T22:59:24
| 406,971,333
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
from django.contrib import admin
from .models import Payment
@admin.register(Payment)
class PaymentAdmin(admin.ModelAdmin):
list_display = [
"__str__",
"order",
"doc_number",
"email",
"transaction_amount",
"mercado_pago_id",
"mercado_pago_status",
"mercado_pago_status_detail",
"created",
"modified",
]
list_filter = ["mercado_pago_status", "modified"]
search_fields = ["doc_number", "email", "mercado_pago_id"]
|
[
"caio.ferreiras@icloud.com"
] |
caio.ferreiras@icloud.com
|
0240e5bee41ad6c47695c4c80efebd23471e954e
|
144b09ebc05ab1f157c680a5237055b57861af82
|
/Warehouse/Inventory.py
|
b0ea9530dcd4671ea7463d475970181862734aa6
|
[
"MIT"
] |
permissive
|
upandacross/warehouse_optimization
|
98ce101ed845fca7a044137fc11f9754112a8a58
|
c1bae87bcb3371a7073d75e147322b4aee459f6a
|
refs/heads/master
| 2020-06-01T03:56:41.364403
| 2019-06-22T02:56:10
| 2019-06-22T02:56:10
| 190,624,578
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,024
|
py
|
from collections import defaultdict
from Warehouse.Bin import Bin
class Inventory:
'''
A singleton class, Inventory serves much like a factory using closure to return instances that
refer to a single hidden class within of which there is a single instance. References to the
hidden attributes are accomplished by a custom __getattr__ method. Updates to the
hidden attributes are accomplished by a custom __setattr__ method.
There are no arguments to __init__.
Assumptions:
each bin location contains only one item and, therefore each location occurs once in the inventory
'''
__stock = defaultdict(set) # Inventory.__stock[item] = locations that can be bassed to Bin.get_bin_by_location(location)
def __init__(self):
pass
@classmethod
def clear(cls):
cls.__stock = defaultdict(set) # cls.__stock[item] = location
@classmethod
def update_bin(cls, location, item_no, qty):
'''
add or subtract qty in bin at location
if item being updated != item in bin at location, bin.__item, bin.__count = item_no, qty
Assumption:
if called to update bin with new item, then all inbins with old item will be replaced everywhere.
Use Bin.__stock_bin to update individual bins but be sure to only update bin instances that live in Inventory
'''
assert isinstance(location, Bin.Bin_Location), 'location SBE instance of Bin.Bin_Location, is {}'.format(location.__class__)
assert isinstance(qty, int), 'qty mst be int'
assert isinstance(item_no, int) and item_no > 0, 'item_no must be int > 0'
b = Bin.get_bin_by_location(location)
if b is None:
b = Bin(rack_no=location.rack, side=location.side, bin_no=location.bin_no)
if b.item != item_no:
try:
cls.__stock[item_no].discard(location)
except:
pass
b.stock_bin(item_no, qty)
cls.__stock[item_no].add(b.location)
pass
@classmethod
def get_location_bin(cls, location):
assert isinstance(location, Bin.Bin_Location), 'location must be an Bin.Bin_Location, is {}'.format(location)
b = Bin.get_bin_by_location(location)
return b
@classmethod
def get_stock_qty(cls, item_no=None, location=None):
'''
First, if location is not None return bin.count @ location, otherwise check item_no is not None
If item_no is not None, return quantity of item_no at all location.
If both item_no and location are None or both are Not None, error
'''
assert item_no is not None or location is not None, 'either item_no or location are not None'
assert item_no is None or location is None, \
'either item_no or location are not None, NOT both'
if location is not None:
b = Bin.get_bin_by_location(location)
return (b.item, b.count) # it is caller's responsibility to check b.item == item_no
elif item_no is None or item_no not in cls.__stock: # item_no is not None by assertion above
return (item_no, 0)
else:
return (item_no, sum([Bin.get_bin_by_location(loc).count for loc in cls.__stock[item_no]]))
def __repr__(self):
if len(self.stock.values()) > 0:
qty = sum(
[sum(Bin.get_bin_by_location(loc).count for loc in Inventory.__stock[itm]
)
for itm in Inventory.__stock.keys()
]
)
else:
qty = 0
return 'Inventory: {:,d} items, {:,d} total quantity'\
.format(len(self.stock), qty)
def __str__(self):
return Inventory.__repr__(self)
@property
def stock(self):
return type(self).__stock
@stock.setter
def stock(self, args):
raise RuntimeError('stock is maintained in inventory via Bin stocking')
|
[
"1205122+upandacross@users.noreply.github.com"
] |
1205122+upandacross@users.noreply.github.com
|
11a2eaa85d8243be87c2e539d618f7ffa20e6b83
|
94d5ef47d3244950a0308c754e0aa55dca6f2a0e
|
/app/eduqa/__init__.py
|
c7dc8af6a68d35de502955b4d708b7ed63eba6f3
|
[] |
no_license
|
MUMT-IT/mis2018
|
9cbc7191cdc1bcd7e0c2de1e0586d8bd7b26002e
|
69fabc0b16abfeba44173caa93d4f63fa79033fd
|
refs/heads/master
| 2023-08-31T16:00:51.717449
| 2023-08-31T11:30:13
| 2023-08-31T11:30:13
| 115,810,883
| 5
| 5
| null | 2023-09-14T10:08:35
| 2017-12-30T17:06:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 90
|
py
|
from flask import Blueprint
eduqa_bp = Blueprint('eduqa', __name__)
from . import views
|
[
"likit.pre@mahidol.edu"
] |
likit.pre@mahidol.edu
|
e0c8e6ddc179d4521efdc223a7e389df6781749b
|
45883bbc0f6f23e01dc70cb4077800434994e78b
|
/docs/extensions/index.py
|
7565e0b49c15e68d89246aad60b8bc519fc62599
|
[
"DOC",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
ddavila0/htcondor
|
08441707e6482e0d92ee189a26324a77761be425
|
8835cc3bacc998d82841d14e962165cb5d3c2ee4
|
refs/heads/master
| 2023-08-27T11:11:13.453792
| 2021-08-02T23:13:14
| 2021-08-02T23:13:14
| 313,687,412
| 0
| 0
|
Apache-2.0
| 2020-11-20T22:55:19
| 2020-11-17T17:04:54
|
C++
|
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
import os
import sys
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from sphinx import addnodes
from sphinx.errors import SphinxError
from sphinx.util.nodes import split_explicit_title, process_index_entry, \
set_role_source_info
def dump(obj):
for attr in dir(obj):
print("obj.%s = %r" % (attr, getattr(obj, attr)))
def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
# create new reference target
env = inliner.document.settings.env
targetid = 'index-%s' % env.new_serialno('index')
targetnode = nodes.target('', '', ids=[targetid])
# split text and target in role content
has_explicit_title, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
# if an explicit target is given, we can process it as a full entry
if has_explicit_title:
entries = process_index_entry(target, targetid)
# otherwise we just create a "single" entry
else:
# but allow giving main entry
main = ''
if target.startswith('!'):
target = target[1:]
title = title[1:]
main = 'main'
entries = [('single', target, targetid, main, None)]
indexnode = addnodes.index()
indexnode['entries'] = entries
set_role_source_info(inliner, lineno, indexnode) # type: ignore
textnode = nodes.Text(" ", " ")
return [indexnode, targetnode, textnode], []
def setup(app):
app.add_role("index", index_role)
|
[
"coatsworth@cs.wisc.edu"
] |
coatsworth@cs.wisc.edu
|
bf67858f1f83c56485831f8b94b83dc6d25a7ea9
|
7f3cbde8b31cc7ef064b303de54807f59ea0d3c8
|
/Algorithms/Warmup/a_very_big_sum.py
|
caace756e8b2e04ad881f5fd9e99bbe6db07df21
|
[] |
no_license
|
Marlysson/HackerRank
|
7f9ea6a04cd7b97ba5c43c5e321b219511a64106
|
98e65be30d8e6f70ca75676441dc9b1fd7fcac1b
|
refs/heads/master
| 2020-04-06T04:28:43.522255
| 2016-10-07T04:18:06
| 2016-10-07T04:18:06
| 55,646,873
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# -*- coding : utf-8 -*-
#numbers of large numbers to sum
#Challenge : https://www.hackerrank.com/challenges/a-very-big-sum
n = int(input())
numbers_str = str(input())
numbers = []
def to_list(string_numbers):
from string import punctuation
for caractere in punctuation:
string_numbers = string_numbers.replace(caractere," ")
return string_numbers.strip().split()
def convert(lista):
return map(int,lista)
def sum_numbers(numbers):
return sum(numbers)
numbers = to_list(numbers_str)
converted = convert(numbers)
soma = sum_numbers(converted)
print(soma)
|
[
"marlysson5@gmail.com"
] |
marlysson5@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.