blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
620b6dda3cf88205a7c9f1e46efff99abe37eb7d | 256728286889a60e5d8896efc6869483daba3280 | /cinemanio/sites/imdb/migrations/0001_initial.py | 1f14d2c4b9e565d481ff8bf0acc5215f8e05d89a | [
"MIT"
] | permissive | cinemanio/backend | 5236be94d08ec79b9fc8d8973aee93ec8fad9b1b | c393dc8c2d59dc99aa2c3314d3372b6e2bf5497f | refs/heads/master | 2021-05-01T13:02:08.102705 | 2019-11-10T14:33:37 | 2019-11-10T14:33:37 | 121,069,149 | 4 | 0 | MIT | 2020-02-12T00:09:03 | 2018-02-11T01:00:31 | Python | UTF-8 | Python | false | false | 1,273 | py | # Generated by Django 2.0.1 on 2018-01-26 01:06
import cinemanio.sites.imdb.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImdbMovie',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False, verbose_name='IMDb ID')),
('rating', models.FloatField(blank=True, db_index=True, null=True, verbose_name='IMDb rating')),
('movie', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='imdb', to='core.Movie')),
],
bases=(models.Model, cinemanio.sites.imdb.models.UrlMixin),
),
migrations.CreateModel(
name='ImdbPerson',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False, verbose_name='IMDb ID')),
('person', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='imdb', to='core.Person')),
],
bases=(models.Model, cinemanio.sites.imdb.models.UrlMixin),
),
]
| [
"ramusus@gmail.com"
] | ramusus@gmail.com |
7c7221fa64006ffeb187d80c5c61089cad20c41f | 04b3d872caa5c057642b2b6ed672675e6c58ac53 | /yelp_vch_scraper/yelp_vch_scraper/middlewares.py | 0cfc3fe6a04fe86f936d968c39e9750d68c49b4d | [] | no_license | brandontkessler/scrapers | 4a6b59321125e73fc9c8660d00cea1c199a7eb1a | 413d9b3d3b06b8cc7955cde117590b6b7fc0f699 | refs/heads/master | 2022-11-06T18:16:11.812766 | 2019-07-30T19:22:34 | 2019-07-30T19:22:34 | 199,532,189 | 0 | 0 | null | 2022-11-04T19:35:40 | 2019-07-29T22:01:07 | Python | UTF-8 | Python | false | false | 3,613 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class YelpVchScraperSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class YelpVchScraperDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"brandontkessler@gmail.com"
] | brandontkessler@gmail.com |
908a7f137c7d77f79dcb503a8a9d2dda08602255 | 68072366b67058780f8a629b5a8fcebdfbec1590 | /test/test_config.py | b0b51f77970482c314336dc1c2f2da8a7f6b56f4 | [
"MIT"
] | permissive | markdrago/banter | fb6bf0aea3ac8e9dc8c82790911e09fbfd1f9d77 | 5d57d109bf9c3535750cfb845f08ccc861e38047 | refs/heads/master | 2021-01-19T19:31:19.024415 | 2015-11-09T13:18:10 | 2015-11-09T13:18:10 | 7,073,738 | 2 | 2 | null | 2015-11-07T16:41:49 | 2012-12-09T00:09:15 | Python | UTF-8 | Python | false | false | 2,995 | py | import unittest
from mock import Mock
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from banter import config
class TestConfig(unittest.TestCase):
def test_get_value(self):
contents = "[auth]\n"
contents += "token = mdrago:102:abcdefghijklmnopqrstuvwxyz\n\n"
c = self.get_config_with_contents(contents)
expected = "mdrago:102:abcdefghijklmnopqrstuvwxyz"
self.assertEqual(expected, c.get_value('auth', 'token'))
def test_get_value_when_value_not_present(self):
contents = "[auth]\n"
c = self.get_config_with_contents(contents)
self.assertIsNone(c.get_value('auth', 'token'))
def test_get_value_when_section_not_present(self):
contents = "[not_auth]\n"
c = self.get_config_with_contents(contents)
self.assertIsNone(c.get_value('auth', 'token'))
def test_set_value_and_save_overwrites_existing_value(self):
# setup a primed config object
contents = "[auth]\n"
contents += "token = mdrago:102:abcdefghijklmnopqrstuvwxyz\n\n"
c = self.get_config_with_contents(contents)
# create a stringIO object to receive the changes and set the new token
newtoken = "mdrago:104:wheredidthealphabetgo"
c.set_value('auth', 'token', newtoken)
fp = StringIO()
c.save_fp(fp)
filecontents = fp.getvalue()
fp.close()
# verify that the new token was written
expected = "[auth]\n"
expected += "token = mdrago:104:wheredidthealphabetgo\n\n"
self.assertEqual(expected, filecontents)
@staticmethod
def get_config_with_contents(contents):
buffer = StringIO(contents)
c = config.Config()
c.load_from_file_pointer(buffer)
buffer.close()
return c
def test_load_from_file_calls_config_parser_read(self):
c = config.Config()
c.parser = Mock()
c.filename = '/tmp/configfile'
c.load_from_file()
c.parser.read.assert_called_with('/tmp/configfile')
def test_as_dict(self):
c = config.Config()
c.set_value('sec1', 'key1', 'val1')
c.set_value('sec1', 'key2', 'val2')
c.set_value('sec2', 'key3', 'val3')
expected = {
'sec1': {
'key1': 'val1',
'key2': 'val2'
},
'sec2': {
'key3': 'val3'
}
}
self.assertEqual(expected, c.as_dict())
def set_from_dict(self):
c = config.Config()
replacement = {
'sec1': {
'key1': 'val1',
'key2': 'val2'
},
'sec2': {
'key3': 'val3'
}
}
c.set_from_dict(replacement)
self.assertEqual('val1', c.get_value('sec1', 'key1'))
self.assertEqual('val2', c.get_value('sec1', 'key2'))
self.assertEqual('val3', c.get_value('sec2', 'key3'))
| [
"markdrago@gmail.com"
] | markdrago@gmail.com |
0b9fdafd9012da3e7de11c97acde26a00ae5d363 | 91502409e1de21daca0ac771290acb1e840f298f | /src/Transformer-TTS/transformer.py | da18861a417b959b3b8cda76aa017327b6665c56 | [] | no_license | ishine/MultispeakerTTS | d759db15604b9ebe3d8bedba27de99c849618d17 | f0267faf57c9a6c0cdade88d01b25f4d82dae85a | refs/heads/master | 2021-04-13T03:34:15.285892 | 2020-03-02T01:32:36 | 2020-03-02T01:32:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | import tensorflow as tf
from tensorflow.keras import layers
class Transformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
target_vocab_size, pe_input, pe_target, rate=0.1):
super(Transformer, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff,
input_vocab_size, pe_input, rate)
self.decoder = Decoder(num_layers, d_model, num_heads, dff,
target_vocab_size, pe_target, rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask,
look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
| [
"zakwwebb@gmail.com"
] | zakwwebb@gmail.com |
a9735c0896569ca1a2fa0992ca72325452c956bb | 6006b0452379166d5e4bc085227c8351bedd264a | /Tacotron/madeGspeech/record.py | b2aaa3d6991963d2619bb1f61c50cb045177b353 | [] | no_license | firstcoding17/Tacotron | a0cd7d764d368afa71e820d1dda66f60b58590a7 | 234f739b1ab8fc55bf974679075c52d01e58fdbb | refs/heads/master | 2023-07-25T08:40:48.849708 | 2019-12-23T18:17:52 | 2019-12-23T18:17:52 | 184,554,512 | 0 | 0 | null | 2023-07-06T21:20:42 | 2019-05-02T09:30:04 | Jupyter Notebook | UTF-8 | Python | false | false | 1,670 | py | ##녹음
import pyaudio
import wave
import time
RATE = 48000
CHANNELS = 2
#
FORMAT = pyaudio.paInt16
DEVICE_INDEX = 2
CHUNK = 1024
SAMPLING_PER_SEC = RATE // CHUNK
RECORD_SEC = 5
pa = pyaudio.PyAudio()
try:
if pa.is_format_supported(RATE, input_device=DEVICE_INDEX, input_channels=CHANNELS, input_format=FORMAT):
# input stream 객체
stream = pa.open(rate=RATE, channels=CHANNELS, format=FORMAT, input=True,
input_device_index=DEVICE_INDEX, frames_per_buffer=CHUNK, start=False)
else:
print("지원하지 않는 형식입니다.")
pa.terminate()
exit()
except:
print("샘플링 속도 또는 장치 설정 문제입니다.")
pa.terminate()
exit()
frames=[]
stream.start_stream()
print("녹음 시작")
start_t = time.time()
try:
# 최대 버퍼 크기의 1/2 만 읽어들이기 때문에 반복은 2 배로 해야한다
for i in range(int(SAMPLING_PER_SEC*RECORD_SEC*2)):
# 기본적으로 little endian으로 데이터가 출력 된다
string_data = stream.read(CHUNK//2, exception_on_overflow=True)
frames.append(string_data)
except IOError as e:
print("오버 플로우")
stream.close()
pa.terminate()
exit()
print("녹음 시간",time.time()-start_t)
print("녹음 종료")
stream.stop_stream()
stream.close()
pa.terminate()
now = time.localtime()
w = wave.open("record_"+str(now.tm_min)+'_'+str(now.tm_sec)+'.wav', 'wb')
w.setnchannels(CHANNELS)
w.setsampwidth(pa.get_sample_size(FORMAT))
w.setframerate(RATE)
w.writeframes(b''.join(frames))
w.close() | [
"noreply@github.com"
] | noreply@github.com |
6216f8236cd09f608eacb70d7507bc0ac3a525f7 | b4491eb4aea6bb8620373f4c1cdb61639f56bb75 | /lesson_7/task_2.py | 360e34748b2eb78813b6611aca932ecfd1d03ab6 | [] | no_license | ApolloNick/Memory_holder_for_Hillel | 92643a2fa11b726b218f2f707c302807352dbeb7 | 12df12fb4e467efd2becf44fb0771f37014c8be7 | refs/heads/main | 2023-07-05T07:30:27.417183 | 2021-08-22T19:20:31 | 2021-08-22T19:20:31 | 374,915,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | def convertor_for_temperatures(amount_of_degrees: int, type_of_system: str):
if type_of_system == "Celsius":
value_kelvin = amount_of_degrees + 273.15
value_fahrenheit = float(amount_of_degrees) * 9.0 / 5.0 + 32
return value_kelvin, value_fahrenheit
elif type_of_system == "Kelvin":
value_celsius = amount_of_degrees - 273.15
value_fahrenheit = value_celsius * 9.0/5.00 + 32
return value_celsius, value_fahrenheit
elif type_of_system == "Fahrenheit":
value_celsius = (amount_of_degrees - 32) * 5.0/9.0
value_kelvin = (amount_of_degrees - 32) * 5.0/9.0 + 273.15
return value_celsius, value_kelvin
else:
print("You entered invalid value")
| [
"nkostrov7@gmail.com"
] | nkostrov7@gmail.com |
c27f9949af47f0826b64a66cfbde19ef82a82f72 | efa326097d25e030666e75bf5a851e8c68fc62c2 | /code/heuristic-stats-parser.py | 0f421e1285d52ab3403d32f6a765e4d9aaf81293 | [] | no_license | mzumsteg/downward-lab | 940c4be2280bcffe4d6f907884a3a7ff01ef69c8 | e3ba6e6747a4ba60ba185c8184c80763825ec7bf | refs/heads/master | 2020-05-03T04:59:19.235859 | 2019-10-29T14:54:46 | 2019-10-29T14:54:46 | 178,437,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | #! /usr/bin/env python
import re
import logging
from lab.parser import Parser
class MultiPattern:
def __init__(self, attribute, regex, mapper, required=False, flags=''):
self.attribute = attribute
self.mapper = mapper
self.required = required
flag = 0
for char in flags:
try:
flag |= getattr(re, char)
except AttributeError:
logging.critical('Unknown pattern flag: {}'.format(char))
self.regex = re.compile(regex, flag)
def search(self, content, filename):
found_items = []
for match in self.regex.finditer(content):
value = self.mapper(match)
found_items.append(value)
if self.required and not found_items:
logging.error('Pattern "%s" not found in %s' % (self, filename))
return {self.attribute: found_items}
def __str__(self):
return self.regex.pattern
class HParser(Parser):
def add_special_pattern(self, pattern, file='run.log'):
self.file_parsers[file].add_pattern(pattern)
print("Running heuristic information parser")
parser = HParser()
parser.add_special_pattern(MultiPattern("h_split_statistics",
r"Best heuristic value \(N = (\d+)\): (.+) \((\d+) distinct\)",
lambda m: [int(m.group(1)), float(m.group(2)), int(m.group(3))]))
parser.parse()
| [
"mar.zumsteg@stud.unibas.ch"
] | mar.zumsteg@stud.unibas.ch |
bfa7845a3715e92b22b02ae33fc01bfb05d211e5 | 29e82a7b9412b10600fb5c7638c0918e08af67d7 | /exps/algos/R_EA.py | bc3345bcc4569b24352b35198ea4b2200718e996 | [
"MIT"
] | permissive | chenmingTHU/NAS-Projects | faa2edccd821b0ae0876179a1b02e7872d4bd91e | f8f44bfb31ed50c7156f9125ba34e49159848fb7 | refs/heads/master | 2020-12-02T14:48:17.363203 | 2019-12-29T09:17:26 | 2019-12-29T09:17:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,177 | py | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################################
# Regularized Evolution for Image Classifier Architecture Search #
##################################################################
import os, sys, time, glob, random, argparse
import numpy as np, collections
from copy import deepcopy
import torch
import torch.nn as nn
from pathlib import Path
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from config_utils import load_config, dict2config, configure2str
from datasets import get_datasets, SearchDataset
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
from utils import get_model_infos, obtain_accuracy
from log_utils import AverageMeter, time_string, convert_secs2time
from nas_102_api import NASBench102API as API
from models import CellStructure, get_search_spaces
class Model(object):
def __init__(self):
self.arch = None
self.accuracy = None
def __str__(self):
"""Prints a readable version of this bitstring."""
return '{:}'.format(self.arch)
def valid_func(xloader, network, criterion):
data_time, batch_time = AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
end = time.time()
with torch.no_grad():
for step, (arch_inputs, arch_targets) in enumerate(xloader):
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# prediction
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
# record
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update (arch_prec1.item(), arch_inputs.size(0))
arch_top5.update (arch_prec5.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
return arch_losses.avg, arch_top1.avg, arch_top5.avg
def train_and_eval(arch, nas_bench, extra_info):
if nas_bench is not None:
arch_index = nas_bench.query_index_by_arch( arch )
assert arch_index >= 0, 'can not find this arch : {:}'.format(arch)
info = nas_bench.get_more_info(arch_index, 'cifar10-valid', None, True)
valid_acc, time_cost = info['valid-accuracy'], info['train-all-time'] + info['valid-per-time']
#_, valid_acc = info.get_metrics('cifar10-valid', 'x-valid' , 25, True) # use the validation accuracy after 25 training epochs
else:
# train a model from scratch.
raise ValueError('NOT IMPLEMENT YET')
return valid_acc, time_cost
def random_architecture_func(max_nodes, op_names):
# return a random architecture
def random_architecture():
genotypes = []
for i in range(1, max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_name = random.choice( op_names )
xlist.append((op_name, j))
genotypes.append( tuple(xlist) )
return CellStructure( genotypes )
return random_architecture
def mutate_arch_func(op_names):
"""Computes the architecture for a child of the given parent architecture.
The parent architecture is cloned and mutated to produce the child architecture. The child architecture is mutated by randomly switch one operation to another.
"""
def mutate_arch_func(parent_arch):
child_arch = deepcopy( parent_arch )
node_id = random.randint(0, len(child_arch.nodes)-1)
node_info = list( child_arch.nodes[node_id] )
snode_id = random.randint(0, len(node_info)-1)
xop = random.choice( op_names )
while xop == node_info[snode_id][0]:
xop = random.choice( op_names )
node_info[snode_id] = (xop, node_info[snode_id][1])
child_arch.nodes[node_id] = tuple( node_info )
return child_arch
return mutate_arch_func
def regularized_evolution(cycles, population_size, sample_size, time_budget, random_arch, mutate_arch, nas_bench, extra_info):
"""Algorithm for regularized evolution (i.e. aging evolution).
Follows "Algorithm 1" in Real et al. "Regularized Evolution for Image
Classifier Architecture Search".
Args:
cycles: the number of cycles the algorithm should run for.
population_size: the number of individuals to keep in the population.
sample_size: the number of individuals that should participate in each tournament.
time_budget: the upper bound of searching cost
Returns:
history: a list of `Model` instances, representing all the models computed
during the evolution experiment.
"""
population = collections.deque()
history, total_time_cost = [], 0 # Not used by the algorithm, only used to report results.
# Initialize the population with random models.
while len(population) < population_size:
model = Model()
model.arch = random_arch()
model.accuracy, time_cost = train_and_eval(model.arch, nas_bench, extra_info)
population.append(model)
history.append(model)
total_time_cost += time_cost
# Carry out evolution in cycles. Each cycle produces a model and removes
# another.
#while len(history) < cycles:
while total_time_cost < time_budget:
# Sample randomly chosen models from the current population.
start_time, sample = time.time(), []
while len(sample) < sample_size:
# Inefficient, but written this way for clarity. In the case of neural
# nets, the efficiency of this line is irrelevant because training neural
# nets is the rate-determining step.
candidate = random.choice(list(population))
sample.append(candidate)
# The parent is the best model in the sample.
parent = max(sample, key=lambda i: i.accuracy)
# Create the child model and store it.
child = Model()
child.arch = mutate_arch(parent.arch)
total_time_cost += time.time() - start_time
child.accuracy, time_cost = train_and_eval(child.arch, nas_bench, extra_info)
if total_time_cost + time_cost > time_budget: # return
return history, total_time_cost
else:
total_time_cost += time_cost
population.append(child)
history.append(child)
# Remove the oldest model.
population.popleft()
return history, total_time_cost
def main(xargs, nas_bench):
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads( xargs.workers )
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10'
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
split_Fpath = 'configs/nas-benchmark/cifar-split.txt'
cifar_split = load_config(split_Fpath, None, None)
train_split, valid_split = cifar_split.train, cifar_split.valid
logger.log('Load split file from {:}'.format(split_Fpath))
config_path = 'configs/nas-benchmark/algos/R-EA.config'
config = load_config(config_path, {'class_num': class_num, 'xshape': xshape}, logger)
# To split data
train_data_v2 = deepcopy(train_data)
train_data_v2.transform = valid_data.transform
valid_data = train_data_v2
search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split)
# data loader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split) , num_workers=xargs.workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True)
logger.log('||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size))
logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))
extra_info = {'config': config, 'train_loader': train_loader, 'valid_loader': valid_loader}
search_space = get_search_spaces('cell', xargs.search_space_name)
random_arch = random_architecture_func(xargs.max_nodes, search_space)
mutate_arch = mutate_arch_func(search_space)
#x =random_arch() ; y = mutate_arch(x)
logger.log('{:} use nas_bench : {:}'.format(time_string(), nas_bench))
logger.log('-'*30 + ' start searching with the time budget of {:} s'.format(xargs.time_budget))
history, total_cost = regularized_evolution(xargs.ea_cycles, xargs.ea_population, xargs.ea_sample_size, xargs.time_budget, random_arch, mutate_arch, nas_bench if args.ea_fast_by_api else None, extra_info)
logger.log('{:} regularized_evolution finish with history of {:} arch with {:.1f} s.'.format(time_string(), len(history), total_cost))
best_arch = max(history, key=lambda i: i.accuracy)
best_arch = best_arch.arch
logger.log('{:} best arch is {:}'.format(time_string(), best_arch))
info = nas_bench.query_by_arch( best_arch )
if info is None: logger.log('Did not find this architecture : {:}.'.format(best_arch))
else : logger.log('{:}'.format(info))
logger.log('-'*100)
logger.close()
return logger.log_dir, nas_bench.query_index_by_arch( best_arch )
if __name__ == '__main__':
parser = argparse.ArgumentParser("Regularized Evolution Algorithm")
parser.add_argument('--data_path', type=str, help='Path to dataset')
parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.')
# channels and number-of-cells
parser.add_argument('--search_space_name', type=str, help='The search space name.')
parser.add_argument('--max_nodes', type=int, help='The maximum number of nodes.')
parser.add_argument('--channel', type=int, help='The number of channels.')
parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')
parser.add_argument('--ea_cycles', type=int, help='The number of cycles in EA.')
parser.add_argument('--ea_population', type=int, help='The population size in EA.')
parser.add_argument('--ea_sample_size', type=int, help='The sample size in EA.')
parser.add_argument('--ea_fast_by_api', type=int, help='Use our API to speed up the experiments or not.')
parser.add_argument('--time_budget', type=int, help='The total time cost budge for searching (in seconds).')
# log
parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')
parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')
parser.add_argument('--arch_nas_dataset', type=str, help='The path to load the architecture dataset (tiny-nas-benchmark).')
parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')
parser.add_argument('--rand_seed', type=int, default=-1, help='manual seed')
args = parser.parse_args()
#if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000)
args.ea_fast_by_api = args.ea_fast_by_api > 0
if args.arch_nas_dataset is None or not os.path.isfile(args.arch_nas_dataset):
nas_bench = None
else:
print ('{:} build NAS-Benchmark-API from {:}'.format(time_string(), args.arch_nas_dataset))
nas_bench = API(args.arch_nas_dataset)
if args.rand_seed < 0:
save_dir, all_indexes, num = None, [], 500
for i in range(num):
print ('{:} : {:03d}/{:03d}'.format(time_string(), i, num))
args.rand_seed = random.randint(1, 100000)
save_dir, index = main(args, nas_bench)
all_indexes.append( index )
torch.save(all_indexes, save_dir / 'results.pth')
else:
main(args, nas_bench)
| [
"280835372@qq.com"
] | 280835372@qq.com |
8f3b6dd785a104a1985f13ba77bbd4751286ee03 | 7fd8a09fd94d09d568d67afcb4ecf3b60a936fe2 | /Tests/TestEnvironment/test_config.py | ad9fcccfe8d638613e2087450489742dbd85bc2a | [
"MIT"
] | permissive | dev-11/eigen-technical-task | 4c2ac82c02f2cbd6b7020d2cbfc33beca20db37f | c0b041fc2bd27d2706ccdab94f6eb618f17098bd | refs/heads/master | 2021-05-20T22:14:32.015768 | 2021-03-28T12:02:50 | 2021-03-28T12:02:50 | 252,434,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | DIRECTORIES_TO_SCAN = ['test_docs/']
TXT_FILE_EXTENSION = 'txt'
DEFAULT_INTERESTING_WEIGHT = 1
INTERESTING_RATING_THRESHOLD = 5
| [
"otto@masterbranch.io"
] | otto@masterbranch.io |
4286d6e8f7466f4a7c7b415049764bd995510e58 | 272cf6bd5f56812e14c2ed0df60d626859ec2c96 | /imdb_scrapy/spiders/script.py | e4449b1818474a1e4a37f9c3fa7e6064e5dd476e | [] | no_license | abhinavjha98/scrapy_simple_hired | a1b5933be5a401585f6cdfef48299b765cf25303 | a0dbf812d1d4a5e16d8bf46633bdc95b747f2fd3 | refs/heads/master | 2023-01-24T05:46:24.639774 | 2020-11-30T17:17:09 | 2020-11-30T17:17:09 | 298,634,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | # -*- coding: utf-8 -*-
import scrapy
import urllib
import requests
# item class included here
class DmozItem(scrapy.Item):
# define the fields for your item here like:
ApplyLink = scrapy.Field()
Title = scrapy.Field()
Company = scrapy.Field()
Location = scrapy.Field()
salary = scrapy.Field()
Logo = scrapy.Field()
Description = scrapy.Field()
class DmozSpider(scrapy.Spider):
name = "dmoz"
page_number = 2
start_urls = [
'https://www.simplyhired.com/search?q=java&l=Philadelphia%2C+PA&job=fYxbZPaOvxUi_StIPQGdAhmm__9ReBI5jbVy7amchpkhgoG5xdkwUA'
]
BASE_URL = 'https://www.simplyhired.com'
def parse(self, response):
links = response.css('a.card-link').xpath("@href").extract()
for link in links:
absolute_url = self.BASE_URL + link
yield scrapy.Request(absolute_url, callback=self.parse_attr)
next_page = "https://www.simplyhired.com/search?q=java&l=Philadelphia%2C+PA&pn="+str(DmozSpider.page_number)+"&job=fYxbZPaOvxUi_StIPQGdAhmm__9ReBI5jbVy7amchpkhgoG5xdkwUA"
if DmozSpider.page_number<=91:
DmozSpider.page_number +=1
yield response.follow(next_page,callback=self.parse)
def parse_attr(self, response):
item = DmozItem()
logo = response.css('img.viewjob-company-logoImg').xpath("@src").extract()
try:
item["Logo"] = DmozSpider.BASE_URL+""+logo[0]
except:
item["Logo"] = 'none'
item["Title"] = response.css("div.viewjob-jobTitle::text").extract()
item["Location"] = response.css("div.viewjob-labelWithIcon::text")[1].extract()
item["Company"] = response.css("div.viewjob-labelWithIcon::text")[0].extract()
aa=response.css("div.p::text").extract()
text_list=""
for text in aa:
text = text.rstrip("\n")
text_list=text_list+text
item["Description"] = text_list
links = response.css('a.btn-apply').xpath("@href").extract()
# final_url = urllib.request.urlopen("https://www.simplyhired.com"+links[0],None,1).geturl()
final_url = requests.get("https://www.simplyhired.com"+links[0])
item["ApplyLink"] = final_url.url
item["salary"]=response.css("span.viewjob-labelWithIcon::text").extract()
return item | [
"abhinavjha98ald@gmail.com"
] | abhinavjha98ald@gmail.com |
d212b97e65ff4b31400a53849b1752ac83d9b736 | 4932130e91984cedb824609b4d4dd65e1ba6fbf3 | /doc/lsystem_1.py | 54b5d250c2c50af4085e58a3c59384c51d3f7663 | [] | no_license | junqi108/MAppleT | 999e6089c01d9b535727efc9dd3e0ddc9a6b4183 | 090370f08271455f6c1b89592a0b7eb18212a6c9 | refs/heads/master | 2023-06-20T13:24:34.111337 | 2021-07-20T15:10:10 | 2021-07-20T15:10:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,677 | py | from openalea.lpy import *
__revision__ = " $Id: stocatree.lpy 9964 2010-11-23 12:24:26Z cokelaer $ "
try:
import openalea.stocatre.optimisation as optimisation
except:
import openalea.stocatree.non_optimised as optimisation
import openalea.stocatree.constants as constants
from openalea.stocatree.output import Data
from openalea.stocatree.colors import Colors
from openalea.stocatree.tree import Tree
from openalea.stocatree.leaf import AppleLeaf
from openalea.stocatree.fruit import AppleFruit
from openalea.stocatree.wood import Wood
from openalea.stocatree.internode import Internode
from openalea.stocatree.apex import apex_data
from openalea.plantik.tools.config import ConfigParams
from openalea.stocatree.tools.simulation import SimulationStocatree
from openalea.stocatree.sequences import Markov, generate_sequence, terminal_fate
from openalea.stocatree.metamer import metamer_data
from openalea.stocatree.growth_unit import growth_unit_data
from openalea.sequence_analysis import HiddenSemiMarkov
from vplants.plantgl.all import Vector3, cross, Viewer
from openalea.stocatree.srandom import boolean_event
from openalea.stocatree.physics import rotate_frame_at_branch, rupture
from openalea.stocatree.tools.surface import *
from openalea.stocatree import get_shared_data
import time
import os
import datetime
gravity = Vector3(0.0, 0.0, -9.81); #// in m s^-2 original mappleT
# First, read the configuration file
options = ConfigParams(get_shared_data('stocatree.ini'))
# Then, define a data structure to store outputs such as MTG, counts, sequences and so on
data = Data(options=options, revision=__revision__)
# Initialise the simulation
simulation = SimulationStocatree(dt=options.general.time_step,
starting_date=options.general.starting_year,
ending_date=options.general.end_year)
# Read PGLshape surfaces
stride = int(options.stocatree.stride_number)
leaf_surface = leafSurface(stride, stride)
ground_surface = groundSurface(stride, stride)
petal_surface = petalSurface(stride, stride)
# init markov and tree instances
markov = Markov(**options.markov.__dict__)
markov.hsm_96_medium = HiddenSemiMarkov(get_shared_data('fmodel_fuji_5_15_y3_96.txt'))
markov.hsm_97_medium = HiddenSemiMarkov(get_shared_data('fmodel_fuji_5_15_y4_97.txt'))
markov.hsm_98_medium = HiddenSemiMarkov(get_shared_data('fmodel_fuji_5_15_y5_98.txt'))
markov.hsm_95_long = HiddenSemiMarkov(get_shared_data('fmodel_fuji_y12.txt'))
markov.hsm_96_long = HiddenSemiMarkov(get_shared_data('fmodel_fuji_16_65_y3_96.txt'))
markov.hsm_97_long = HiddenSemiMarkov(get_shared_data('fmodel_fuji_16_65_y4_97.txt'))
markov.hsm_98_long = HiddenSemiMarkov(get_shared_data('fmodel_fuji_16_65_y5_98.txt'))
# The following objects (tree, wood, internode, apex_parameters, leaf, fruit
# are used to store the user parameters and are used by the metamer_data
# class to create new metamers.
#
# tree is unique throughout the simulation, so only one instance is used
tree = Tree(**options.tree.__dict__)
# wood and internode are unique as well isnce they only contain parameters
wood = Wood(**options.wood.__dict__)
internode = Internode(**options.internode.__dict__)
#!!! apices and leaves are specific to a metamer later on a deepcopy is used.
temp = {}
temp.update(options.apex.__dict__)
temp.update(options.markov.__dict__)
apex_parameters = temp
leaf_parameters = options.leaf.__dict__
fruit_parameters = options.fruit.__dict__
# setup the colors once for all
colors = Colors()
#define the leaf area function once for all
simulation.func_leaf_area_init(get_shared_data('functions.fset'))
###################################
# DONT CHANGE ANYTHING HERE BELOW #
###################################
#define the group enumerate here 0 used for rendering. !! physics and update_parameters inverse on purpose
initialisation = 0
update_parameters = 1
update_structure = 4
statistics = 3
physics = 2
# module apex(apex_data): scale=2
# module branch(): scale=1
# module growth_unit(growth_unit_data): scale=1
# module axiom()
# module metamer(metamer_data): scale=2
# module root(): scale=1
numerical_resolution_counter = 0
def Start():
global time1
time1 = time.time()
#random.seed(simulation.seed)
# init the data to store the required outputs (sequence, lstring, mtg, ...)
data.init()
def StartEach():
if simulation.date.year in [1994,1995]:
markov.hsm_medium = markov.hsm_96_medium
markov.hsm_long = markov.hsm_95_long
elif simulation.date.year == 1996:
markov.hsm_medium = markov.hsm_96_medium
markov.hsm_long = markov.hsm_96_long
elif simulation.date.year == 1997:
markov.hsm_medium = markov.hsm_97_medium
markov.hsm_long = markov.hsm_97_long
else:
markov.hsm_medium = markov.hsm_98_medium
markov.hsm_long = markov.hsm_98_long
def EndEach(lstring):
global time1
if (simulation.date.month==1 and simulation.date.day == 1) or (simulation.date.month==6 and simulation.date.day==30) and simulation.phase == physics:
print simulation.date, time.time()-time1, len(lstring)
if simulation.date > simulation.ending_date:
print 'The simulation has ended %s %s\n' % (options.general.end_year, simulation.date)
Stop()
End(lstring)
# This switch controls the selection of which group of
# productions to apply. The general sequence is:
# initialisation --> update parameters --> output (L-string or MTG) --> physics --> statistics --> update structure --> update parameters
# Rendering happens after 'update parameters'. 'output' is only
# called conditionally; mostly, the simulation goes straight from
# 'output parameters' to 'physics'
if simulation.phase == initialisation:
useGroup(update_parameters)
simulation.phase = update_parameters
frameDisplay(False)
elif simulation.phase == update_parameters:
global numerical_resolution_counter
numerical_resolution_counter += 1
if numerical_resolution_counter < simulation.rotation_convergence.steps:
simulation.dt = 0.0 # days
frameDisplay(False)
#jump to the physics phase
else:
if options.general.verbose is True:
print '%s (n elts=%s, it=%s)' % (simulation.date, len(lstring), getIterationNb())
if options.stocatree.saveimage is True:
print 'saving stocatree_output%05d.png' % getIterationNb(), 'png'
Viewer.frameGL.saveImage('stocatree_output%05d.png' % getIterationNb(), 'png')
simulation.dt = simulation.base_dt
numerical_resolution_counter = 0
newyear = simulation.advance()
#TODO#
if simulation.events.harvest.active:
tree.fruits_harvested = tree.fruits
tree.fruits = 0
simulation.harvested = True
#outputs
if options.output.mtg:
save = data.mtg.advance(simulation.dt)
if save:
data.mtg.build_filename("%4.0f_%02d_%02d" % (simulation.date.year, simulation.date.month, simulation.date.day))
data.mtg.init()
data.mtg.save(lstring, simulation.date, tree.trunk_radius)
# save trunk data
if options.output.trunk:
data.trunk.save(simulation.date, tree.trunk_radius,tree.trunk_cross_sectional_area)
# save the lstring
if options.output.l_string:
save = data.l_string.advance(simulation.dt)
if save:
data.l_string.save(lstring, simulation.date)
# save the shoot counts
if options.output.counts:
save = data.counts.advance(simulation.dt)
if save:
data.counts.save(simulation.date)
if simulation.date.day == 0:
data.counts.reset()
useGroup(physics)
simulation.phase = physics
backward()
elif simulation.phase == statistics:
useGroup(update_structure)
simulation.phase = update_structure
frameDisplay(False)
elif simulation.phase == physics:
useGroup(statistics)
simulation.phase = statistics
forward()
frameDisplay(False)
elif simulation.phase == update_structure:
useGroup(update_parameters)
simulation.phase = update_parameters
frameDisplay(False)
else:
ValueError('must not enter here')
def End(lstring):
global data
data.close_all()
data.save()
global time1
if options.stocatree.savescene is True:
s = Viewer.getCurrentScene()
s.save('stocatree.bgeom')
s.save('stocatree.pov')
time2 = time.time()
print 'Elpsed time:',time2-time1
print 'Final iteration nb',getIterationNb()
print '%s (n elts=%s, it=%s)' % (simulation.date, len(lstring), getIterationNb())
if options.stocatree.movie is True:
from openalea.plantik.tools.movies import create_movie
create_movie(input_glob='stocatree*png', output_filename='stocatree')
__derivation_length__ = int(options.general.max_iterations)
__axiom__ = [75]
# ignore: growth_unit
# production:
# The L-system starts with this group. If there were any errors
# in the initialisations in Start, pruduce an error message;
# otherwise, start the simulation.
# group 0:
def __p_0_0_axiom() :
a = apex_data(tree.initial_hlu, 'trunk', **apex_parameters)
return pproduce(0,a)
# Update the parameters of each metamer (age, reaction wood,
# organs, length, rigidity & shape memory) and perform the
# geometric reconstruction (rotation and placement of each metamer)
# group 1:
def __p_1_0_metamer_ml_branchmetamer_m_(ml,m) :
m.update_metamer_parameters(simulation)
m.organ_activity(simulation)
if options.stocatree.mechanics:
m.hlu = rotate_frame_at_branch(ml.hlu, ml.branching_angle, ml.phyllotactic_angle);
m.hlu = optimisation.reorient_frame(m.hlu, m.rotation_velocity, m.length)
m.update_position(ml.position)
return pproduce(1,m)
def __p_1_1_metamer_ml_metamer_m_(ml,m) :
m.update_metamer_parameters(simulation)
m.organ_activity(simulation)
if options.stocatree.mechanics:
m.hlu = optimisation.reorient_frame(ml.hlu, m.rotation_velocity, m.length)
m.update_position(ml.position)
return pproduce(2,m)
def __p_1_2_metamer_m_(m) :
m.update_metamer_parameters(simulation)
m.organ_activity(simulation)
m.update_position()
return pproduce(3,m)
#X:
# produce Cut()
# Calculate the width (by the pipe model), cumulated mass, cumulated torque and
# rotation velocity of each metamer
#group physics
# group 2:
def __p_2_0_rootmetamer_m_(m) :
tree.trunk_radius = m.radius
tree.trunk_cross_sectional_area = constants.pi * m.radius * m.radius
tree.fruit_load = tree.fruits / tree.trunk_cross_sectional_area
def __p_2_1_metamer_m__branchmetamer_mb__metamer_mr_(m,mb,mr) :
radius = optimisation.get_new_radius(mb.radius, mr.radius)
if m.leaf.state=='growing':
radius = optimisation.get_new_radius(radius, m.leaf.petiole_radius)
m.radius = optimisation.max(radius, m.radius);
#update last layer thickness
m.layers[-1].thickness = m.radius - m.layers[-1].radius
m.compute_mass(mr, mb)
#cumulated torque cumulate mass must be in kg
if options.stocatree.mechanics:
m.cumulated_torque = mb.cumulated_torque + mr.cumulated_torque + \
cross((mb.hlu.heading * mb.length), (gravity * mb.cumulated_mass)) \
+ cross((mr.hlu.heading * mr.length), (gravity * mr.cumulated_mass))\
+ cross((m.hlu.heading * m.length) , tree.tropism)
m.calculate_rotation_velocity(simulation, options.stocatree.stake)
return pproduce(4,m)
def __p_2_2_metamer_m_metamer_mr_(m,mr) :
radius = mr.radius
if m.leaf.state == 'growing':
radius = optimisation.get_new_radius(mr.radius, m.leaf.petiole_radius)
m.radius = optimisation.max(radius, m.radius)
m.layers[-1].thickness = m.radius - m.layers[-1].radius
m.compute_mass(mr)
if options.stocatree.mechanics:
m.cumulated_torque \
= cross((mr.hlu.heading * mr.length), (gravity * mr.cumulated_mass))\
+ mr.cumulated_torque \
+ cross((m.hlu.heading * m.length) , tree.tropism)
m.calculate_rotation_velocity(simulation, options.stocatree.stake)
return pproduce(5,m)
def __p_2_3_metamer_m_apex_a_(m,a) :
# wood.density, m.fruit_mass aer units objects
radius = a.radius
if m.leaf.state=='growing':
radius = optimisation.get_new_radius(a.radius, m.leaf.petiole_radius)
m.radius = optimisation.max(radius, m.radius);
m.layers[-1].thickness = m.radius - m.layers[-1].radius
m.compute_mass()
m.cumulated_torque = cross( m.hlu.heading * m.length , tree.tropism)
if options.stocatree.mechanics:
m.calculate_rotation_velocity(simulation, options.stocatree.stake)
return pproduce(6,m)
def __p_2_4_apex_a_(a) :
if (a.sequence_position == 0 and a.radius < a.target_radius):
a.terminal_expansion(simulation.dt.days)
return pproduce(7,a)
#X:
# produce Cut()
#// Generate new sequences
#group statistics
# group 3:
def __p_3_0_apex_a_(a) :
#fprint simulation.events.bud_break
if (a.sequence_position == 0 and a.get_observation()!='dormant' and
(a.parent_observation == 'floral' or simulation.events.bud_break.active)):
old_observation = a.get_observation()
a.sequence = generate_sequence(a.get_observation(),\
markov, simulation.date.year, options.stocatree.second_year_draws,
select_trunk=[int(options.stocatree.select_trunk)])
a.sequence_position = len(a.sequence)
if (a.get_observation()=='trunk'):
a.set_observation('large')
elif (a.get_observation()=='small' and boolean_event(tree.spur_death_probability)):
a.set_observation('dormant')
elif (a.get_observation()=='floral'):
a.set_observation('dormant')
else:
a.set_observation(terminal_fate(simulation.date.year,a.get_observation()))
a.parent_observation = old_observation
a.radius = 0
a.max_terminal_radius_target();
#CHANGES tree.growth_units
tree.growth_units += 1
#update counts
if options.output.counts:
if a.parent_observation=='floral':
data.counts.florals+=1
elif a.parent_observation == 'small':
data.counts.shorts+=1
elif a.parent_observation == 'medium':
data.counts.mediums+=1
elif a.parent_observation == 'large':
data.counts.longs += 1
if (a.sequence_position < 26):
data.counts.len_16_to_25 +=1
elif (a.sequence_position < 41):
data.counts.len_26_to_40+=1
else:
data.counts.len_over_40+=1
# save sequences into output data
if (options.output.sequences and simulation.date.year < 1999 and (a.parent_observation in ['large','medium','small'])):
data.sequences.save(a.sequence, a.sequence_position)
pproduce(8,growth_unit_data(tree.growth_units, simulation.date.year, a.parent_observation == 'floral'),a)
else:
return pproduce(9,a)
#X:
# produce Cut()
#// Add new apices (terminal and lateral) and metamers
#// to the structure
#group update_structure
# group 4:
def __p_4_0_metamer_m_apex_a_(m,a) :
# if plastochron is reached, we produce a new metamer
if (a.sequence_position > 0 and m.age >= m.internode._plastochron):
a.sequence_position-=1
flower = (a.sequence_position == 0 and a.parent_observation=='floral')
if m.year == simulation.date.year:
number = m.number + 1
else:
number = 1
#print fruit_parameters
mn = metamer_data(floral=flower, number=number, hlu=a.hlu,
zone=a.sequence[a.sequence_position][0], observation=a.get_observation_from_sequence(),
p_angle=(m.phyllotactic_angle + tree.phyllotactic_angle),
b_angle=tree.branching_angle, wood=wood, internode=internode,
fruit=AppleFruit(**fruit_parameters), leaf=AppleLeaf(**leaf_parameters))
mn.trunk = a.trunk;
mn.year = simulation.date.year
return pproduce(10,mn,a)
else:
return pproduce(11,a)
def __p_4_1_apex_a_(a) :
#if Debug:print 'APEX seq pos=',a.sequence_position
if (a.sequence_position > 0):
a.sequence_position -= 1
branching_angle = tree.branching_angle;
flower = (a.sequence_position == 0 and a.parent_observation=='floral')
#TODO check first and second index of a.sequence versus lsystem.l code
m = metamer_data(floral=flower, number=1, hlu=a.hlu, zone=a.sequence[a.sequence_position][0], observation=a.get_observation_from_sequence(), p_angle=(tree.phyllotactic_angle), b_angle=branching_angle, wood=wood, internode=internode,
fruit=AppleFruit(**fruit_parameters), leaf=AppleLeaf(**leaf_parameters))
m.trunk = a.trunk
m.year = simulation.date.year
return pproduce(12,m,a)
else:
return pproduce(13,a)
def __p_4_2_metamer_m_metamer___apex_a_(m,a) :
# case of a floral immediate lateral axis: should be treated as
# laterals and not as terminals
if (not m.developped and a.parent_observation == 'floral' and a.sequence_position == 0):
m.developped = True
if (boolean_event(tree.inflorescence_death_probability)):
return pproduce(14,m)
m.branching_angle = tree.floral_branching_angle
hlu = rotate_frame_at_branch(m.hlu, m.branching_angle, m.phyllotactic_angle);
sylleptic_apex = apex_data(hlu, terminal_fate(simulation.date.year, 'floral'), **apex_parameters)
sylleptic_apex.parent_observation = 'floral'
return pproduce(15,m,sylleptic_apex)
def __p_4_3_metamer_m_(m) :
if (options.stocatree.ruptures and rupture(m.cumulated_torque, m.radius, wood._modulus_of_rupture)):
print 'EXTRAORDINARY EVENT: There was a rupture in the system.\n'
return pproduce(16,'Cut')
if (m.observation!= 'dormant' and not m.developped and simulation.events.bud_break.active):
m.developped = True
hlu = rotate_frame_at_branch(m.hlu, m.branching_angle, m.phyllotactic_angle)
a = apex_data(hlu, m.observation, **apex_parameters)
return pproduce(17,m,a)
else:
return pproduce(18,m)
#X:
# produce Cut()
# group 0:
#// Graphical rendering of the tree
# interpretation:
def __h_0_0_root() :
return pproduce(19,colors.ground,ground_surface)
def __h_0_1_metamer_m_(m) :
#print 'interpretation called', getIterationNb()
shoot_colour = colors.error
if options.stocatree.render_mode == 'bark':
shoot_colour = colors.bark
elif options.stocatree.render_mode == 'observations':
shoot_colour = colors.observation.get_color(m.observation)
elif options.stocatree.render_mode == 'zones':
shoot_colour = colors.zone.get_color(m.zone)
elif options.stocatree.render_mode == 'reaction_wood':
shoot_colour = colors.reaction_wood.get_color(m.layers[-1].reaction_wood)
elif options.stocatree.render_mode == 'year':
shoot_colour = colors.year.get_color(m.year, options.general.starting_year)
pproduce(20,m.hlu.heading.x,m.hlu.heading.y,m.hlu.heading.z,m.hlu.up.x,m.hlu.up.y,m.hlu.up.z)
pproduce(21,m.radius*12.,shoot_colour,m.length*10)
d2r = 180.0 / constants.pi
if (m.fruit.state == 'flower'):
#TODO the five flowers are at the same place !!
scale = 5.
pproduce(22,m.phyllotactic_angle * d2r)
pproduce(23)
pproduce(24,colors.stamen)
pproduce(25,0.0025*scale)
pproduce(26)
pproduce(27)
pproduce(28)
pproduce(29)
pproduce(30)
pproduce(31)
pproduce(32)
pproduce(33)
pproduce(34)
pproduce(35)
pproduce(36)
pproduce(37)
pproduce(38,colors.petal)
pproduce(39,petal_surface,0.02*scale)
pproduce(40,petal_surface,0.02*scale)
pproduce(41,petal_surface,0.02*scale)
pproduce(42,petal_surface,0.02*scale)
pproduce(43,petal_surface,0.02*scale)
pproduce(44)
#
elif (m.fruit.state == 'fruit'):
r = m.fruit.mass *1.5
if r != 0:
pproduce(45,float(m.phyllotactic_angle * d2r))
pproduce(46,r,colors.fruit,r)
#f
if m.leaf.state=='growing':
r = m.leaf.mass *1000.
if r==0:
r=0.1
#check max total mass should be less than 0.66 grams
if simulation.events.autumn.active is False:
pproduce(47,colors.leaf)
else:
pproduce(48,colors.autumn_leaf)
#Don't touch this!!! change the r value only
# at least don't change the factors between F and PglShape (keep the 0.1 factor).
#TODO take m radisu into account
#nproduce (RollL(m.phyllotactic_angle * d2r) Up(90.0) SetWidth(0.002) F(r *0.1) RollToVert() PglShape(leaf_surface, r) EB())
pproduce(49,m.phyllotactic_angle * d2r,r *0.1,leaf_surface,r)
#nproduce (RollL(m.phyllotactic_angle * d2r) Up(90.0) SetWidth(0.002) F(r *0.1) ~l(r) EB())
return pproduce(50)
# endgroup
# homomorphism:
# endlsystem
| [
"christophe.pradal@inria.fr"
] | christophe.pradal@inria.fr |
b1163b2984e2eb93acf05d4c2079a8f7368f68bc | a04c52e256b52a69f8786045d7751907b7d652cd | /python/inflowPrepMMC.py | 1d0eb28fe69b6ca4a1f2f956efdcea8abbd65f35 | [] | no_license | mchurchf/ABLTools | 9b77e599759d38888bf7af44295b550ff8bb7396 | 474dc20b98f91dfc2b15853128e3ae62666f29cb | refs/heads/master | 2021-01-20T15:23:51.950863 | 2019-06-19T21:29:04 | 2019-06-19T21:29:04 | 90,765,882 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,595 | py | # inflowPrepMMC.py
#
#
# Matt Churchfield
# National Renewable Energy Laboratory
# 25 August 2017
#
# This is a module to deal with preparing inflow for mesoscale-microscale
# coupled cases over complex terrain.
import numpy as np
# Class to deal with the mesoscale-microscale inflow preparation.
class inflowPrepMMC:
# Initialize the class.
def __init__(self):
self.name = []
self.nPoints = []
self.xyz = []
# Read OpenFOAM boundaryData format points file.
def readBoundaryDataPoints(self,pointsFileName):
# Figure out header length.
f = open(pointsFileName,'r')
lineText = f.readline()
i = 0
while (lineText[0] != '('):
i = i + 1
lineText = f.readline()
headerLength = i - 1
f.close()
# Read in data.
f = open(pointsFileName,'r')
for i in range(headerLength):
f.readline()
self.nPoints = int(f.readline())
f.readline()
self.xyz = np.zeros((self.nPoints,3))
for i in range(self.nPoints):
data = f.readline().split()
x = float(data[0][1:])
y = float(data[1])
z = data[2]
z = float(data[2][0:len(z)-1])
self.xyz[i,0] = x
self.xyz[i,1] = y
self.xyz[i,2] = z
f.close()
# Write OpenFOAM boundaryData format points file.
def writeBoundaryDataPoints(self,pointsFileName):
f = open(pointsFileName,'w')
f.write('\n')
nPoints = len(self.xyz)
nDim = 3
f.write(str(nPoints) + '\n')
f.write('(\n')
for i in range(nPoints):
f.write('(')
for j in range(nDim):
if (j < nDim-1):
f.write(str(self.xyz[i,j]) + ' ')
else:
f.write(str(self.xyz[i,j]))
f.write(')\n')
f.write(')')
f.close()
# Write WRFtoFOAM format data file.
def writeDataWRFtoFOAM(self,pointsFileName,UTMoffsetX,UTMoffsetY,UTMzone):
f = open(pointsFileName,'w')
nPoints = len(self.xyz)
for i in range(nPoints):
[lat,lon] = self.UTMtoLatLon(self.xyz[i,0]+UTMoffsetX,self.xyz[i,1]+UTMoffsetY,UTMzone)
f.write(str(lat) + ' ' + str(lon) + ' ' + str(self.xyz[i,2]) + '\n')
f.close()
# Read OpenFOAM boundaryData format field file.
def readBoundaryDataField(self,fieldFileName):
# Figure out the header length.
f = open(fieldFileName,'r')
lineText = f.readline()
if (len(lineText.strip()) > 0):
lineText = lineText.strip()
lineTextPrevious = lineText
i = 0
while (((lineText[0] != '(')) or
((lineText[0] == '(') and (lineTextPrevious[0] == '/'))):
i = i + 1
lineTextPrevious = lineText
lineText = f.readline()
if (len(lineText.strip()) > 0):
lineText = lineText.strip()
headerLength = i - 1
f.close()
# Get data sizes.
f = open(fieldFileName,'r')
for i in range(headerLength):
f.readline()
self.nPoints = int(f.readline())
f.readline()
data = f.readline().split()
# Remove parentheses
if (data[0] == '('):
del(data[0])
if (data[-1] == ')'):
del(data[-1])
if (data[0][0] == '('):
data[0][0] = data[0][1:]
if (data[-1][-1] == ')'):
data[-1] = data[-1][:-1]
nDim = len(data)
field = np.zeros((self.nPoints,nDim))
f.close()
# Read through the header.
f = open(fieldFileName,'r')
for i in range(headerLength+2):
f.readline()
# Read in the data stripping out parentheses, if they exist, and
# newline characters.
for i in range(self.nPoints):
data = f.readline().strip().strip('(').strip(')').split()
for j in range(nDim):
field[i,j] = float(data[j])
f.close()
return field
# Write OpenFOAM boundaryData format field file.
def writeBoundaryDataField(self,fieldFileName,field):
f = open(fieldFileName,'w')
f.write('\n')
nPoints = len(field)
nDim = len(field[0])
f.write(str(nPoints) + '\n')
f.write('(\n')
for i in range(nPoints):
if (nDim == 1):
f.write(str(field[i,0]) + '\n')
else:
f.write('(')
for j in range(nDim):
if (j < nDim-1):
f.write(str(field[i,j]) + ' ')
else:
f.write(str(field[i,j]))
f.write(')\n')
f.write(')')
f.close()
# Convert foamToVTK VTK files to xyz numpy array.
def foamToVTKtoXYZ(self,VTKfileName):
# Open the foamToVTK generated boundary file.
f = open(VTKfileName,'r')
# Read through the header.
f.readline()
self.name = f.readline()
f.readline()
f.readline()
sizing = f.readline().split()
self.nPoints = int(sizing[1])
# Make an array for the x, y, z data.
data = np.zeros((3*self.nPoints))
# Read in the data.
totalReadLength = 0
while totalReadLength < 3*self.nPoints:
d = f.readline().split()
nd = len(d)
for i in range(nd):
data[totalReadLength + i] = float(d[i])
totalReadLength = totalReadLength + nd
# Close the VTK file.
f.close()
# Make an array for the ordered x, y, z data and assign the data to it.
self.xyz = np.zeros((self.nPoints,3))
for j in range(3):
self.xyz[:,j] = data[j::3]
# Sort the xyz data.
def sortXYZ(self,planeType,sortOrder=None,sortIndex=None):
# Based on the plane type, set the sort order to follow OpenFoam standard
# boundary sorting order.
if (sortIndex == None):
if (sortOrder == None):
if planeType == 'xy':
sortOrder = [0,1]
elif planeType == 'xz':
sortOrder = [0,2]
elif planeType == 'yz':
sortOrder = [2,1]
else:
print 'Invalid specification of planeType:'
print 'Need xy, xz, or yz.'
sortInd = np.lexsort((self.xyz[:,sortOrder[0]],self.xyz[:,sortOrder[1]]))
else:
sortInd = sortIndex
self.xyz = self.xyz[sortInd]
return sortInd
# Translate the xyz data.
def translateXYZ(self,T):
self.xyz = self.xyz + T
# Rotate the xyz data.
def rotateXYZ(self,R):
for i in range(self.nPoints):
self.xyz[i] = np.matmul(self.xyz[i],R)
# Given x,y,z data, find the bottom surface.
def findSurface(self,planeType,boundingBox,searchRadius):
# Apply bounding box.
ind = []
for i in range(self.nPoints):
x = self.xyz[i,0]
y = self.xyz[i,1]
z = self.xyz[i,2]
if ((x > boundingBox[0,0]) and (x < boundingBox[0,1]) and
(y > boundingBox[1,0]) and (y < boundingBox[1,1]) and
(z > boundingBox[2,0]) and (z < boundingBox[2,1])):
ind.append(i)
keepInd = np.asarray(ind)
xyzBound = self.xyz[keepInd]
# Now find lower left point.
if (planeType == 'yz'):
ind = self.xyz[:,1].argmin()
print ind
return xyzBound
# Driver for conversion from latitude-longitude to UTM coordinates.
def LatLonToUTM(self,latitude,longitude,UTMzone=None):
if isinstance(latitude,np.ndarray):
# Get the dimensions of the coordinates passed into here.
dims = latitude.shape
ndims = len(dims)
ni = dims[0]
nj = 0
nk = 0
if (ndims > 1):
nj = dims[1]
if (ndims > 2):
nk = dims[2]
UTMx = np.zeros(dims)
UTMy = np.zeros(dims)
if UTMzone is None:
UTMzoneArray = np.chararray(dims,itemsize=4)
for i in range(ni):
for j in range(nj):
for k in range(nk):
if UTMZone is None:
[UTMx[i,j,k],UTMy[i,j,k],UTMzone[i,j,k]] = self.LatLonToUTM_elem(latitude[i,j,k],longitude[i,j,k])
else:
[UTMx[i,j,k],UTMy[i,j,k],UTMzone[i,j,k]] = self.LatLonToUTM_elem(latitude[i,j,k],longitude[i,j,k],UTMzone[i,j,k])
else:
if UTMzone is None:
[UTMx,UTMy,UTMzone] = self.LatLonToUTM_elem(latitude,longitude)
else:
[UTMx,UTMy,UTMzone] = self.LatLonToUTM_elem(latitude,longitude,UTMzone)
# Convert from latitude-longitude to UTM coordinates.
def LatLonToUTM_elem(self,latitude,longitude,UTMzone=None):
# Compute the UTM zone if not given
if UTMzone is None:
z = int((longitude/6.0) + 31.0)
if latitude < -72.0:
l = 'C'
elif latitude < -64.0:
l = 'D'
elif latitude < -56.0:
l = 'E'
elif latitude < -48.0:
l = 'F'
elif latitude < -40.0:
l = 'G'
elif latitude < -32.0:
l = 'H'
elif latitude < -24.0:
l = 'J'
elif latitude < -16.0:
l = 'K'
elif latitude < -8.0:
l = 'L'
elif latitude < 0.0:
l = 'M'
elif latitude < 8.0:
l = 'N'
elif latitude < 16.0:
l = 'P'
elif latitude < 24.0:
l = 'Q'
elif latitude < 32.0:
l = 'R'
elif latitude < 40.0:
l = 'S'
elif latitude < 48.0:
l = 'T'
elif latitude < 56.0:
l = 'U'
elif latitude < 64.0:
l = 'V'
elif latitude < 72.0:
l = 'W'
else:
l = 'X'
if z < 10:
zStr = '0' + str(z)
else:
zStr = str(z)
UTMzone = zStr + ' ' + l
# Compute the UTM coordinates given the zone.
sa = 6378137.0
sb = 6356752.314245
e2 = (((sa**2) - (sb**2))**0.5) / sb
e2sqr = e2**2
c = (sa**2) / sb
lat = latitude * (np.pi/180.0)
lon = longitude * (np.pi/180.0)
z = float(UTMzone[0:2])
S = ((6.0*z) - 183.0)
deltaS = lon - (S * (np.pi/180.0))
a = np.cos(lat) * np.sin(deltaS);
epsilon = 0.5 * np.log((1.0 + a)/(1.0 - a))
nu = np.arctan(np.tan(lat)/np.cos(deltaS)) - lat
v = (c / ((1.0 + (e2sqr * (np.cos(lat))**2)))**0.5) * 0.9996
ta = (0.5*e2sqr) * epsilon**2 * (np.cos(lat))**2
a1 = np.sin(2.0*lat)
a2 = a1 * (np.cos(lat))**2
j2 = lat + (0.5*a1)
j4 = ((3.0*j2) + a2) / 4.0
j6 = ((5.0 * j4) + (a2 * (np.cos(lat))**2))/3.0
alpha = (3.0/4.0) * e2sqr
beta = (5.0/3.0) * alpha**2
gamma = (35.0/27.0) * alpha**3
Bm = 0.9996 * c * (lat - alpha * j2 + beta * j4 - gamma * j6)
UTMx = epsilon * v * (1.0 + (ta/3.0)) + 500000.0
UTMy = nu * v * (1.0 + ta) + Bm
if (UTMy < 0.0):
UTMy = UTMy + 9999999.0
return (UTMx,UTMy,UTMzone)
# Driver for conversion from UTM coordinates to latitude-longitude.
def UTMtoLatLon(self,UTMx,UTMy,UTMzone):
if isinstance(UTMx,np.ndarray):
# Get the dimensions of the coordinates passed into here.
dims = UTMx.shape
ndims = len(dims)
ni = dims[0]
nj = 0
nk = 0
if (ndims > 1):
nj = dims[1]
if (ndims > 2):
nk = dims[2]
latitude = np.zeros(dims)
longitude = np.zeros(dims)
for i in range(ni):
for j in range(nj):
for k in range(nk):
[latitude[i,j,k],longitude[i,j,k]] = self.UTMtoLatLon_elem(self,UTMx[i,j,k],UTMy[i,j,k],UTMzone[i,j,k])
else:
[latitude,longitude] = self.UTMtoLatLon_elem(UTMx,UTMy,UTMzone)
return (latitude,longitude)
# Convert from UTM coordinates to latitude-longitude.
def UTMtoLatLon_elem(self,UTMx,UTMy,UTMzone):
# Perform the conversion from UTM to latitude-longitude
sa = 6378137.0
sb = 6356752.314245
e2 = (((sa**2) - (sb**2))**0.5) / sb
e2sqr = e2**2
c = (sa**2) / sb
# Deal with the UTM zone.
zoneNumber = float(UTMzone[0:2])
if UTMzone[3] > 'M':
y = UTMy
else:
y = UTMy - 10000000.0
sa = 6378137.0
sb = 6356752.314245
e2 = (((sa**2) - (sb**2))**0.5) / sb
c = (sa**2) / sb
x = UTMx - 500000.0
s = ((6.0*zoneNumber) - 183.0)
lat = y / (6366197.724*0.9996)
v = (c / ((1.0 + (e2sqr*(np.cos(lat))**2)))**0.5) * 0.9996
a = x / v
a1 = np.sin(2.0*lat)
a2 = a1 * (np.cos(lat))**2
j2 = lat + (0.5*a1)
j4 = ((3.0*j2) + a2)/4.0
j6 = ((5.0*j4) + (a2*(np.cos(lat))**2))/3.0
alpha = (3.0/4.0)*e2sqr
beta = (5.0/3.0)*(alpha**2)
gamma = (35.0/27.0)*alpha**3
Bm = 0.9996*c*(lat - alpha*j2 + beta*j4 - gamma*j6)
b = (y - Bm) / v
Epsi = ((e2sqr * a**2) / 2.0) * (np.cos(lat))**2
Eps = a * (1.0 - (Epsi/3.0))
nab = (b * (1.0 - Epsi)) + lat
senoheps = (np.exp(Eps) - np.exp(-Eps)) / 2.0
Delt = np.arctan(senoheps / (np.cos(nab) ) )
TaO = np.arctan(np.cos(Delt) * np.tan(nab))
longitude = (Delt *(180.0/np.pi)) + s
latitude = (lat + (1.0 + e2sqr*(np.cos(lat)**2) - (3.0/2.0) * e2sqr * np.sin(lat) * np.cos(lat) * (TaO - lat)) * (TaO - lat)) * (180.0/np.pi)
return (latitude,longitude)
| [
"matt.churchfield@nrel.gov"
] | matt.churchfield@nrel.gov |
49e3e2dc21de028828c27795a9fc50162734f705 | bb453b5c265ea1567de6750c910a35c80ee984d5 | /OSG/Gravel/SLiM_gravel_osg.py | f50c4d9c7372d20ff9a6973bfc5cc9cdd759d4d4 | [] | no_license | SantosJGND/Models | 8134142970a15ecf59c613947c681a434da7de91 | b7b6740071a4caefe0addac7d883eb1bb18fb8d4 | refs/heads/master | 2021-05-18T21:13:05.754098 | 2020-11-21T20:52:26 | 2020-11-21T20:52:26 | 251,423,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,737 | py |
from tools.SLiM_pipe_tools import (
read_chrom_sizes, region_samplev2,
fasta_RextractUnif, write_fastaEx, process_recipe,
SLiM_osg_dispenser,
)
from tools.ABC_utilities import demo_to_recipe
###########
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-R', '--recipe', type=str,
default='Human_sims/Gravel_2011_frame_sample.slim')
parser.add_argument('-d', '--demo', type=str,
default='demos/P&M2013_M3.txt')
parser.add_argument('-c', '--cookbook', type=str,
default='SizeChange')
parser.add_argument('-s', '--short', type=str,
default='Gravel')
parser.add_argument('-a', '--assembly', type=str,
default='hg38')
parser.add_argument('-r','--rescale', type=float,
default= 1)
parser.add_argument('-N', type=int,
default= 40)
parser.add_argument('-L', type=int,
default= 1000000)
parser.add_argument('--NeC', type=int,
default= 20000)
parser.add_argument('--Nef', type=int,
default= 400000)
parser.add_argument('--rate', type=float,
default= 1.03)
parser.add_argument('--mu', type=float,
default= 1e-8)
parser.add_argument('--rec', type=float,
default= 1e-8)
parser.add_argument('--mfile', type=str,
default= 'mut_matrix_v0.txt')
#parser.add_argument('-a', '--annotations', type=str, nargs='+', default= [])
args = parser.parse_args()
## directories
import os
main_dir= os.getcwd() + '/'
slim_dir= '/home/douradojns/SLiM/'
fastas_dir= '/home/douradojns/SLiM/Fastas/'
chrom_sizes_dir= '/home/douradojns/SLiM/chrom_sizes/'
##
## sub-directories.
dir_data= main_dir + 'mutation_counter/data/sims/'
count_dir= main_dir + 'mutation_counter/count/'
dir_launch= main_dir + 'mutation_counter'
slim_soft= slim_dir + 'sim*'
matrix_dir= '' #'/' + main_dir + 'mut_matices/'
log_dir= 'log'
summary_file= 'sims.log'
mutlog= 'toMut.log'
os.makedirs(log_dir, exist_ok=True)
#
##
## SLiM recipe.
#sim_dir= main_dir + 'Recipes/Human_sims/'
sim_template= main_dir + 'Recipes/' + args.recipe
##
##
#
########### ############################## #############
############################################################
## Simulation tag names, assembly to select from.
batch_name= args.short
assembly= args.assembly
## files & variables
## fasta segment lengths; number of segments / sims.
L= args.L
N= args.N
############################################################
######################## ##############################
## Read chrom_sizes file to decide where to sample files from.
chrom_sizes= read_chrom_sizes(assembly, size_dir= chrom_sizes_dir)
## Sample fasta.
##
fasta= fastas_dir + assembly + '.fa.gz'
rseqs= region_samplev2(L, chrom_sizes,N, fasta)
from tools.SLiM_pipe_tools import SLiM_dispenserv2
## Perform Simulations
## I. get cookfunction and args:
selected_book= 'cook_constants_' + args.cookbook
import tools.cookbook
book= getattr(tools.cookbook, selected_book)
cookargs= {
"s1": 500,
"s2": 500,
"s3": 500,
"mu": args.mu,
"rec": args.rec
}
sim_store, cookID= book(rseqs,dir_data= dir_data,
slim_dir= slim_dir, batch_name= batch_name,**cookargs)
print('launch SLiM jobs.')
SLiM_osg_dispenser(sim_store, sim_recipe= sim_template,cookID= cookID, slim_dir= slim_dir, batch_name= batch_name,
ID= cookID, L= L, logSims= summary_file, mutlog= mutlog,dir_data= dir_data,
cpus= 1,Nmem= 1,mem= 'GB',diskN= 1,diskS= 'GB',log_dir= log_dir)
######### ##############
#############################################################
| [
"dourado.jns@gmail.com"
] | dourado.jns@gmail.com |
45d9db07a10f58220ed192774bd5337b434140b1 | 005ae4a30a0a322481489e2e4eacc0ac1a5bf36c | /amaascore/assets/derivative.py | f05a3989d7fdd6ea5666f340a3d313ee827966d8 | [
"Apache-2.0"
] | permissive | amaas-fintech/amaas-core-sdk-python | d8b98dcc50c5536250803972b8d39b38e382825a | bd77884de6e5ab05d864638addeb4bb338a51183 | refs/heads/master | 2021-01-11T19:01:50.391765 | 2020-10-27T02:13:28 | 2020-10-27T02:13:28 | 79,296,376 | 0 | 8 | Apache-2.0 | 2018-12-11T08:11:52 | 2017-01-18T02:46:34 | Python | UTF-8 | Python | false | false | 1,358 | py | from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import date, datetime
from dateutil import parser
from decimal import Decimal
from amaascore.assets.asset import Asset
class Derivative(Asset):
def __init__(self, asset_manager_id, asset_id, asset_issuer_id=None,
asset_status='Active', display_name='', description='', country_id=None, venue_id=None,
issue_date=None, currency=None,
comments=None, links=None, references=None,
*args, **kwargs):
if not hasattr(self, 'asset_class'): # A more specific child class may have already set this
self.asset_class = 'Derivative'
super(Derivative, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id, fungible=False,
asset_issuer_id=asset_issuer_id, asset_status=asset_status,
display_name=display_name, roll_price=False,
description=description, country_id=country_id, venue_id=venue_id,
issue_date=issue_date, currency=currency,
comments=comments, links=links, references=references,
*args, **kwargs)
| [
"ned@amaas.com"
] | ned@amaas.com |
1c78faf04ba1a79eed5b1f09d8a19e8c1175fe06 | 5eb003a2c4dc978a00de9fd9fac10ffc41c94297 | /quadratic/views.py | fb7036c002d1b97869bef2bc99f0bc83b39e3437 | [] | no_license | fedorchenko-a/ooniversity_django_courses_06 | c1344d3b97520477e645f51187a74a553c1a8258 | 7070f49b000b402e0c2c04deb0c319115bfebf65 | refs/heads/master | 2020-05-21T02:23:36.296395 | 2017-04-08T18:35:00 | 2017-04-08T18:35:00 | 84,559,180 | 0 | 0 | null | 2017-03-10T12:45:57 | 2017-03-10T12:45:57 | null | UTF-8 | Python | false | false | 1,970 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseNotFound
import math
from django.contrib import messages
from . forms import QuadraticForm
# Create your views here.
def quadratic_results(request):
message = {}
diskr = ''
if request.method == "GET":
form = QuadraticForm(request.GET)
if form.is_valid():
data = form.cleaned_data
a = data['a']
b = data['b']
c = data['c']
deskr = b ** 2 - 4 * a * c
diskr = "Дискриминант: {}".format(deskr)
if deskr < 0:
message = 'Дискриминант меньше нуля, квадратное уравнение не имеет действительных решений.'
elif deskr == 0:
x = (-b + math.sqrt(deskr)) / (2 * a)
message = 'Дискриминант равен нулю, квадратное уравнение имеет один действительный корень: x1 = x2 = {}'.format(round(x))
else:
x1 = -b + math.sqrt(deskr) / (2 * a)
x2 = -b - math.sqrt(deskr) / (2 * a)
message = 'Квадратное уравнение имеет два действительных корня: x1 = {}, x2 = {}'.format(round(x1), round(x2))
else:
form = QuadraticForm()
#context = {'form': form, 'message':message}
return render(request, 'quadratic/results.html', {'form':form, 'message': message, 'diskr':diskr})
'''def index(request):
return render(request, "index.html")
def contact(request):
return render(request, "contact.html")
def student_list(request):
return render(request, "student_list.html")
def student_detail(request):
return render(request, "student_detail.html")'''
| [
"smolnax@gmail.com"
] | smolnax@gmail.com |
1694e505bb5a7e90a0692bc69b4fbe548b49cf2b | 012d191a0869869724ad2169289ddef2c74e6a9e | /Sort_Search/Searching/fibo3.py | 2104ca91502ee5a507b52cac13638fd20cec7017 | [] | no_license | seoninja13/Algo_DataStructures | 7ceda677fe330955f0f01f034b3675e409516919 | 0476cf45f30f1c635340f969bf85877ee3943efc | refs/heads/master | 2021-01-23T01:17:18.462298 | 2017-05-31T18:14:28 | 2017-05-31T18:14:28 | 92,860,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # Example 1: find the fibonacy nth number
# def findFib(n):
# a,b=0,1
# if n<=1:
# return 0
# else:
# for i in range(n):
# a,b= b,a+b
# return a
# print(findFib(6))
# 0, 1, 1, 2, 3, 5, 8, 13, 21 // findFib(n)
# 0 1 2 3 4 5 6 7 8 // n
# a b
# a b
# aib b
# a b
# Example 2: Using recursive function
def fib(n):
if n==1 or n==2:
return 1
else:
return (fib(n-1) + fib(n-2))
print(fib(6))
| [
"dacheviov@gmail.com"
] | dacheviov@gmail.com |
f35f9453708d4f7187b92e2ba3afe2280bf7d580 | 373daa3912c2f1f7c144a20593abac886729fca2 | /products/serializers.py | 1075d01379f7dd9f26ea0e0a9e97dea005c06e8e | [] | no_license | yash-bhat/Gallery | 0cfccb6b2917c403adf5d79e40df51d5377bcac8 | 6e9f7114786eb9adbeb045d3d555626c4cf8ff87 | refs/heads/main | 2023-02-27T22:48:47.142656 | 2021-02-03T19:34:51 | 2021-02-03T19:34:51 | 335,344,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | from rest_framework import serializers
from .models import Product
#to return objects in the API for products
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
# no serializer for 'User' model as we will just return random IDs there | [
"hvyashbhat@gmail.com"
] | hvyashbhat@gmail.com |
ddf61c3298a3b789b970f9e7a487613619465098 | 50d35003334d3947ac71717f670647cb7b954427 | /data/ai.py | 33d2484289294860a0951be232b5b6d7d3a98abf | [] | no_license | NEVTIK-org/Simple-Virtual-Assistant | c40aec818972c001a61b6fe900a2ad91c3a3804d | a5731ec332a0ddd4fd7d6ad1c48c8620d862a6c4 | refs/heads/master | 2022-04-22T01:17:07.561823 | 2020-04-13T12:53:32 | 2020-04-13T12:53:32 | 255,242,805 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,563 | py | #Created by Hermawan S.S.
#hermawansent@gmail.com
import os
import brain
import re
from playsound import playsound
import speech_recognition as sr
def speech_input():
print("\nMe\t\t: ",end="")
user_input = str(brain.spoke()).lower()
if user_input != "none":
print(user_input)
return user_input
def learn_english(args_input=""):
if args_input=="":
args_input=input("New Argument\t: ")
brain.add_args("{}\n".format(args_input.lower()))
ans_input = speech_input()
#ans_input = input("New Answer\t: ")
brain.add_ans_en("{}\n".format(ans_input),str(ans_input))
#Sometime the program can't run effectively more than 6 periods
for _ in range(6):
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source,phrase_time_limit=1.25)
try:
r.adjust_for_ambient_noise(source,duration=0.5)
srtext = r.recognize_sphinx(audio)
except:
srtext = ""
def recon(srtext):
for i in srtext.split():
if i in ["hi","hai","hello","o. k.","okay","ok","hey"]:
return i
elif i == "o.":
i+=" k."
return i
if recon(srtext)in ["hi","hai","hello","o. k.","okay","ok","hey"]:
playsound("ai_ready.mp3")
user_input = str(speech_input()).lower() # Speech Mode
user_input_with_line = user_input+"\n"
# user_input = input("Me\t\t: ") #Text Mode
# user_input = user_input.lower()
# user_input_with_line = user_input + "\n"
if re.search("learn english|learn",user_input):
playsound("teach.mp3")
learn_english()
os.system("python ai.py")
elif re.search("buka|open",user_input):
playsound("opening.mp3")
os.system(user_input[5:])
elif re.search("exit|quit|stop",user_input):
playsound("goodbye.mp3")
quit()
elif user_input_with_line in brain.args():
for user_in in brain.args():
if user_input_with_line==user_in:
q_index = brain.args().index(user_in)
print("JANE\t\t: "+brain.ans()[q_index])
s = [char for char in brain.ans()[q_index]]
del s[-1]
for_sound = ''.join(s)
playsound(for_sound+".mp3")
elif user_input_with_line not in brain.args() and user_input!="\n" and user_input!="" and user_input!=None:
print("JANE\t\t: I don't understand, do you like to teach me?")
playsound("teach2.mp3")
demand = str(speech_input())
rep = [i for i in demand]
rep = ''.join(rep)
if re.match("yes|yeah",rep):
print("JANE\t\t: What should I reply that?")
playsound("asking.mp3")
learn_english(user_input)
os.system("python ai.py")
else:
print("JANE\t\t: OK")
playsound("OK.mp3")
os.system("python ai.py")
| [
"farishelmi@nevtik.org"
] | farishelmi@nevtik.org |
d6652ee362ae8f58bcb2d3e494a5e011f4bcedeb | 46ec244f2b4b28645a7c8a1bfe666fc7c69ec76d | /booking/migrations/0022_auto_20181103_1536.py | fd6a9a66c70fa150f65469b9484dad5ef4e7e4c8 | [] | no_license | bisalgt/hotel-jumera-app | d83c3b97f81f649333f26c9e3c1b2545cf516f40 | b9d03c3bb321206a39a1f134c7c13fdbddd63337 | refs/heads/master | 2022-12-14T19:45:23.238189 | 2018-11-09T09:48:49 | 2018-11-09T09:48:49 | 203,003,707 | 0 | 0 | null | 2022-12-08T01:23:07 | 2019-08-18T13:01:29 | Python | UTF-8 | Python | false | false | 710 | py | # Generated by Django 2.1 on 2018-11-03 09:51
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('booking', '0021_auto_20181103_1421'),
]
operations = [
migrations.AlterField(
model_name='booking',
name='check_in',
field=models.DateField(default=datetime.datetime(2018, 11, 3, 9, 51, 35, 959809, tzinfo=utc)),
),
migrations.AlterField(
model_name='booking',
name='check_out',
field=models.DateField(default=datetime.datetime(2018, 11, 4, 9, 51, 35, 959809, tzinfo=utc)),
),
]
| [
"bisalgt@email.com"
] | bisalgt@email.com |
06a75d32395897be1704baf76488c145cb2b23b8 | 779ae09963d3ee259ab8b9dc6f4d035dd551668c | /venv/bin/pyrsa-priv2pub | 1b6424b1514d7ccc442c721d100d6e54a4ab307c | [] | no_license | manav2401/Apna-Hack-Ayega_hackout | 5fa6473b96f3e02810c452de9b18e9b9295ab015 | 734240b844e20db845b0e03273dbb09f48686e6d | refs/heads/master | 2020-08-06T19:17:34.357676 | 2019-10-13T16:01:45 | 2019-10-13T16:01:45 | 213,121,054 | 1 | 0 | null | 2019-10-13T15:04:35 | 2019-10-06T06:44:32 | Python | UTF-8 | Python | false | false | 255 | #!/home/nisarg/flask/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.util import private_to_public
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(private_to_public())
| [
"nisshah1499@gmail.com"
] | nisshah1499@gmail.com | |
823daf20cb6341e16101ac5a8c0abc46ed7afb5f | 92291e79a95c3c0876c7d5498c53b84e45e703de | /commands/abstract.py | 85c036107c453a68bae11cdcd94e2b64157b51aa | [] | no_license | RafRaf/boobogram-bot | 52932dfde614677bdd1c1c49a2b9de7fa74f454c | 9ae3bc904cb7d333e452f5312e67002bae50ef80 | refs/heads/master | 2022-05-01T23:12:44.096690 | 2022-04-28T06:52:01 | 2022-04-28T06:52:01 | 51,752,737 | 5 | 0 | null | 2018-02-03T09:13:35 | 2016-02-15T12:03:27 | Python | UTF-8 | Python | false | false | 160 | py | class AbstractCommand:
COMMAND = '*TYPE COMMAND HERE*'
def handler(self, bot, update):
raise NotImplementedError('Handler is not implemented')
| [
"smartrafraf@gmail.com"
] | smartrafraf@gmail.com |
8c9849703cc07eca40e79e9815e37eca6f5ad5a7 | 77e2e024741473aa4bfaa91332ed9187130677b5 | /names/my_subprocess.py | 32fc1f9c5438500841b7e4a57c8a5fa499849222 | [] | no_license | ld86/shad-course-work | 444accbd802876d988b7a08c24a51b68b5bfbb87 | 0510e165378ec686455e8937ec146ed12a48063c | refs/heads/master | 2021-01-25T03:54:43.656012 | 2013-08-23T07:48:56 | 2013-08-23T07:48:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | #!/usr/bin/python3.2
import os
import time
import fcntl
import subprocess
class IPopen(subprocess.Popen):
def __init__(self, *args, **kwargs):
"""Construct interactive Popen."""
keyword_args = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE}
keyword_args.update(kwargs)
subprocess.Popen.__init__(self, *args, **keyword_args)
# Make stderr and stdout non-blocking.
for outfile in (self.stdout, self.stderr):
if outfile is not None:
fd = outfile.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
def correspond(self, text, sleep=0.0001):
"""Communicate with the child process without closing stdin."""
self.stdin.write(bytes(text + '\n', 'utf-8'))
self.stdin.flush()
str_buffer = ''
while not str_buffer:
try:
for bin_str in self.stdout:
str_buffer += bin_str.decode('utf-8')
except IOError:
time.sleep(sleep)
return str_buffer
| [
"frizzlywitch@gmail.com"
] | frizzlywitch@gmail.com |
eb2bae37e9c648b4f4f8701e435601f4d4be96e9 | 0f556b9d4e250df73bf1e0929dbd4afad51e82fe | /smaller_than/smaller_than.py | cb6b4e049621e62ab38344e518e8ebe479383f31 | [] | no_license | unabl4/PythonCodeClub | 0ef1cb4d145860a4fda528c2eea513d0ba6b8327 | 72d5887342c1e0b304307a0e0ac9eb78f0202c35 | refs/heads/master | 2021-04-30T04:42:03.266029 | 2019-02-18T22:09:12 | 2019-02-18T22:09:12 | 121,541,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | def smaller_than(number_1, number_2):
return None if number_1 == number_2 else min(number_1, number_2)
| [
"unabl4@gmail.com"
] | unabl4@gmail.com |
19838a190c48902a9799ae5a54116786d9d5576b | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2901/58744/247697.py | 300e82a6c695d61e1fd561bfba7acad1b071cf0a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | num = int(input())
def isAlternatingBits(num):
former_bit = 0 if num & 1 else 1
while num > 0:
if num & 1 == former_bit:
return False
num >>= 1
former_bit = 0 if former_bit else 1
return True
print(str(isAlternatingBits(num)).lower())
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
08fbda338ee07b7d2e4bdaddba5d9ca235ebee02 | 251c899a019af13f0355fc005dad499e12464ee2 | /home/views.py | d0000b07c64a63aa5a865639e8735019709b924d | [] | no_license | vinayhegde105/docker_ECR | d7a556f1a6127e857a24b458bce074819b2ecda3 | db8b2614436487806868862759df601a303350eb | refs/heads/main | 2023-08-05T14:46:19.018733 | 2021-09-23T14:52:15 | 2021-09-23T14:52:15 | 409,539,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,423 | py | from django.shortcuts import render , redirect
# Create your views here.
from .form import *
from django.contrib.auth import logout
def logout_view(request):
logout(request)
return redirect('/')
def home(request):
context = {'blogs' : BlogModel.objects.all()}
return render(request , 'home.html' , context)
def login_view(request):
return render(request , 'login.html')
def blog_detail(request , slug):
context = {}
try:
blog_obj = BlogModel.objects.filter(slug = slug).first()
context['blog_obj'] = blog_obj
except Exception as e:
print(e)
return render(request , 'blog_detail.html' , context)
def see_blog(request):
context = {}
try:
blog_objs = BlogModel.objects.filter(user = request.user)
context['blog_objs'] = blog_objs
except Exception as e:
print(e)
print(context)
return render(request , 'see_blog.html' ,context)
def add_blog(request):
context = {'form' : BlogForm}
try:
if request.method == 'POST':
form = BlogForm(request.POST)
print(request.FILES)
image = request.FILES['image']
title = request.POST.get('title')
user = request.user
if form.is_valid():
content = form.cleaned_data['content']
blog_obj = BlogModel.objects.create(
user = user , title = title,
content = content, image = image
)
print(blog_obj)
return redirect('/add-blog/')
except Exception as e :
print(e)
return render(request , 'add_blog.html' , context)
def blog_update(request , slug):
context = {}
try:
blog_obj = BlogModel.objects.get(slug = slug)
if blog_obj.user != request.user:
return redirect('/')
initial_dict = {'content': blog_obj.content}
form = BlogForm(initial = initial_dict)
if request.method == 'POST':
form = BlogForm(request.POST)
print(request.FILES)
image = request.FILES['image']
title = request.POST.get('title')
user = request.user
if form.is_valid():
content = form.cleaned_data['content']
blog_obj = BlogModel.objects.create(
user = user , title = title,
content = content, image = image
)
context['blog_obj'] = blog_obj
context['form'] = form
except Exception as e :
print(e)
return render(request , 'update_blog.html' , context)
def blog_delete(request , id):
try:
blog_obj = BlogModel.objects.get(id = id)
if blog_obj.user == request.user:
blog_obj.delete()
except Exception as e :
print(e)
return redirect('/see-blog/')
def register_view(request):
return render(request , 'register.html')
def verify(request,token):
try:
profile_obj = Profile.objects.filter(token = token).first()
if profile_obj:
profile_obj.is_verified = True
profile_obj.save()
return redirect('/login/')
except Exception as e :
print(e)
return redirect('/') | [
"root@AdminVM.ce2kuzjo5r2utpyd55bak1t0ia.rx.internal.cloudapp.net"
] | root@AdminVM.ce2kuzjo5r2utpyd55bak1t0ia.rx.internal.cloudapp.net |
6b201919d4ba0b553e94078cae06ca3b7bd4d89a | 3b7cc69990b01cc6765435cc513d06f63edea334 | /Programmers/다리를 지나는트럭.py | 57f7ffc00c6f76bc4d8a94aca27ebbeb8d312612 | [] | no_license | naldal/This-is-coding-test | 9d531e6131f40ee8ecf02fc038f5d2c040325e6f | a5170e0780ccb9f62c963ae46e45bebd1f9d288f | refs/heads/master | 2023-01-05T09:39:38.643980 | 2020-11-05T09:07:15 | 2020-11-05T09:07:15 | 293,424,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | def solution(bridge_length, weight, truck_weights):
answer = 0
for i in range(len(truck_weights)):
print(i)
total_weight = truck_weights[i]
truck_count = 1
reduce = 0
if total_weight < weight:
for j in range(i+1, len(truck_weights)):
total_weight += truck_weights[j]
if total_weight > weight:
total_weight -= truck_weights[j]
break
elif total_weight <= weight:
total_weight += truck_weights[j]
truck_count += 1
reduce += bridge_length
answer += bridge_length + (truck_count - 1) - reduce
answer += 1
print(answer)
return answer
# solution(2, 10, [7, 4, 5, 6])
# solution(100, 100, [10])
solution(100, 100, [10, 10, 10, 10, 10, 10, 10, 10, 10, 10])
| [
"cec7378@gmail.com"
] | cec7378@gmail.com |
49499f173136efc9f5360c976ed035066e6153ff | a48aff43b3b05fedfa4e9262842be0c485a1addf | /python/gamin/racey.py | 270f5505879af934bcbf4ed6cdeb6e97e6ccdded | [] | no_license | Vkeynote/rep3 | 5989d22805efb2c07e038b3027e8eb54adfb6fac | 87789937bd4097cd142c57223e5c656ae2a476f5 | refs/heads/master | 2020-12-30T14:33:37.603440 | 2017-06-22T11:06:07 | 2017-06-22T11:06:07 | 91,320,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | import pygame
import sys
pygame.init()
display_width = 800
display_height = 600
black = (0,0,0)
white = (255,255,255)
green = (0,255,0)
red = (255,0,0)
blue = (0,0,255)
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('Ray-see')
clock = pygame.time.Clock()
crashed = False
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
gameDisplay.fill(green)
pygame.display.update()
clock.tick(60)
pygame.quit()
quit()
| [
"victor kinoti"
] | victor kinoti |
363f86770b28d6cb2e6389bb1e933efbd7d0a25f | e29b380111a9180fb4a5a4b3b227b2a67dbf71b2 | /State.py | 5ba57e77ccc20b5c078696d9a4f7af7e201e1f31 | [] | no_license | ptyshevs/npuzzle | 5af9036537f9125e1689eef856e29aad1fc3fdcf | 05c578aa73a56d9a1a7be642531490e53db5b79a | refs/heads/master | 2020-08-10T05:23:44.598024 | 2019-10-18T10:47:07 | 2019-10-18T10:47:07 | 214,267,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,047 | py | class State(list):
def __init__(self, values=None, empty_loc=None):
"""
Matrix is a rectangular table of numerical values
:param values:
"""
self.shape = len(values), len(values[0])
self.h, self.w = self.shape
# self._validate_values(values)
self.values = values
self.pad = 1
if self.shape[0] > 3:
self.pad = 2
if self.shape[0] > 10:
self.pad = 3
self.came_from = None
self.dir_from = None
if empty_loc is None:
self._find_empty()
else:
self.empty_loc = empty_loc
self.g = 0
self.heur = 0
self.hash = hash(str(self))
def _find_empty(self):
found = False
for i in range(self.h):
for j in range(self.w):
if self.values[i][j] == 0:
self.empty_loc = i, j
found = True
if found:
break
if found:
break
def _rehash(self):
self.hash = hash(str(self))
def _validate_values(self, values):
"""
Validate list of lists to be of correct format
:param values:
:return:
"""
prev_len = -1
i = j = -1
if values is None or len(values) == 0:
self.shape = 0, 0
return
for i, row in enumerate(values):
if prev_len == -1:
prev_len = len(row)
if prev_len != len(row):
raise ValueError(f"Row {i} differs in length: {prev_len} != {len(row)}")
for j, val in enumerate(row):
if type(val) not in (int, float, complex):
raise ValueError(f"[{i}, {j}]: {val} is of bad type ({type(val)})")
if val == 0:
self.empty_loc = (i, j)
if i == -1:
self.shape = 0, 0
else:
self.shape = i + 1, j + 1
def __repr__(self):
if self.values:
n = self.pad
return '\n'.join([' '.join([str(c).rjust(n, ' ') for c in row]) for row in self.values])
else:
return str(self.values)
def _swap(self, to):
# self.empty_loc = to
self[self.empty_loc], self[to] = self[to], self[self.empty_loc]
self.empty_loc = to
self._rehash()
def swap(self, dir, inplace=False):
y, x = self.empty_loc
if dir == 'u' and y == 0 or dir == 'd' and y == self.h - 1:
raise ValueError(f"{dir} cannot be performed")
elif (dir == 'l' and x == 0) or (dir == 'r' and x == self.w - 1):
raise ValueError(f"{dir} cannot be performed")
if inplace:
r = self
else:
r = self.copy()
if dir == 'u':
r._swap((y - 1, x))
elif dir == 'r':
r._swap((y, x + 1))
elif dir == 'd':
r._swap((y + 1, x))
else:
r._swap((y, x - 1))
return r
def __getitem__(self, item):
"""
A[key] -- access by indexing
:param item:
:return:
"""
if type(item) is int:
# select row by default
if self.shape[0] == 1: # iterate by column if it's a row vector
return self.values[0][item]
elif self.shape[1] == 1: # iterate by row if it's a column vector
return self.values[item][0]
return Matrix([self.values[item]])
elif type(item) is list:
return Matrix([self.values[i] for i in item])
elif type(item) is tuple and len(item) == 2 and type(item[0]) is int and type(item[1]) is int:
r, c = item
return self.values[r][c]
elif type(item) is slice:
return Matrix(self.values[item])
else:
for i in item:
if type(i) not in (int, slice):
raise ValueError(f"Bad index type {type(i)}")
if len(item) != 2:
raise ValueError(f"Don't understand index: {item}")
if self.shape == (0, 0):
return Matrix([[]])
row_slice, col_slice = item
rows = self.values[row_slice] # M[0, :] to work
if type(rows[0]) is not list:
rows = [rows]
subset = [row[col_slice] for row in rows]
if type(subset) in (int, float, complex):
return Matrix([[subset]])
elif type(subset) in (list, tuple) and type(subset[0]) in (int, float, complex):
return Matrix([subset])
else:
return Matrix(subset)
def __setitem__(self, key, value):
"""
A[key] = value
:param key:
:param value:
:return:
"""
if type(key) is int:
row = key
col = slice(None, None, None)
else:
row, col = key
if type(row) is int:
row_it = range(row, row + 1)
else:
row_it = range(*row.indices(len(self.values)))
for r in row_it:
if type(col) is int and hasattr(value, 'shape') and r < value.shape[1]: # assigning values from Matrix-like object
self.values[r][col] = value[r]
elif type(col) is int and hasattr(value, 'shape') and value.shape == (1, 1):
self.values[r][col] = value[0, 0]
elif type(col) is int:
self.values[r][col] = value
else:
for c in range(*col.indices(len(self.values[0]))):
self.values[r][c] = value[c]
self._rehash()
def copy(self):
""" Return a copy of a state """
return State([r[:] for r in self.values], empty_loc=self.empty_loc)
def __eq__(self, o):
# print(self, o, self.hash, o.hash)
return self.hash == o.hash
def __hash__(self):
return int(self.hash) | [
"ptyshevs@gmail.com"
] | ptyshevs@gmail.com |
b6ebc4882b065f7bde87c7e46e7c9c9b357583dc | ae6adcc4f81e59cc570889f999d6fa4c919e201b | /dataloader/dataLoad.py | effb825572ffba9bd7fa1bd975578b0712541062 | [
"BSD-3-Clause"
] | permissive | Orieus/WeakLabelModel | 82643f54957236f548f7912c075ec975a8b47cfa | 7d51fc719b8d5a715b55587e382f84b87c54378a | refs/heads/master | 2023-04-05T18:51:53.634708 | 2021-09-27T18:51:24 | 2021-09-27T18:51:24 | 62,823,569 | 0 | 1 | null | 2023-03-25T01:13:14 | 2016-07-07T16:55:27 | Jupyter Notebook | UTF-8 | Python | false | false | 1,658 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" This code contains a method to load datasets for testing classification
algorithns
Author: JCS, June, 2016
"""
# External modules
import sklearn.datasets as skd
def getDataset(name):
# This method provides the feature matrix (X) and the target variables (y)
# of different datasets for multiclass classification
# Args:
# name: A tag name for a specific dataset
#
# Returns:
# X: Input data matrix
# y: Target array.
if name == 'hypercube':
X, y = skd.make_classification(
n_samples=400, n_features=40, n_informative=40,
n_redundant=0, n_repeated=0, n_classes=4,
n_clusters_per_class=2,
weights=None, flip_y=0.0001, class_sep=1.0, hypercube=True,
shift=0.0, scale=1.0, shuffle=True, random_state=None)
elif name == 'blobs':
X, y = skd.make_blobs(
n_samples=400, n_features=2, centers=20, cluster_std=2,
center_box=(-10.0, 10.0), shuffle=True, random_state=None)
elif name == 'blobs2':
X, y = skd.make_blobs(
n_samples=400, n_features=4, centers=10, cluster_std=1,
center_box=(-10.0, 10.0), shuffle=True, random_state=None)
elif name == 'iris':
dataset = skd.load_iris()
X = dataset.data
y = dataset.target
elif name == 'digits':
dataset = skd.load_digits()
X = dataset.data
y = dataset.target
elif name == 'covtype':
dataset = skd.fetch_covtype()
X = dataset.data
y = dataset.target - 1
return X, y
| [
"jcid@tsc.uc3m.es"
] | jcid@tsc.uc3m.es |
7721212f0127819872a18fcc4a779267ff52d0b7 | c9dc5de35b590f748ab29d89b572f270bbff1f45 | /setup.py | c2e112464c6f69152594161f7322e1c272358c47 | [] | no_license | spmacdonald/craigslist | 198600c2bbff456dc23eb47e10eb45f0f4ece3fa | ebf13276b9ea59f465a6fd722b54ca66494f1998 | refs/heads/master | 2016-09-09T17:02:17.308681 | 2013-01-16T09:02:52 | 2013-01-16T09:02:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='craigslist',
description='A Python API for searching Craigslist.',
long_description=read('README'),
author='Andrew Brookins',
author_email='a@andrewbrookins.com',
url='https://github.com/abrookins/craigslist',
version='0.11',
packages=['craigslist'],
install_requires=[
'BeautifulSoup==3.2.0',
'requests==0.10.1'
]
)
| [
"a.m.brookins@gmail.com"
] | a.m.brookins@gmail.com |
45ee16d54b3d9fc6b576564b858c3f81e65f61ac | 8f589b4ce5daa41325dd34af7f55b79d4363fff4 | /rest_api_project/wsgi.py | 293555757e895f29980343018f5ac48a41d000ff | [] | no_license | manoliskorniotakis/geo_rest_api | 506ba9413c1a6e33bc90cc8b6961df7220226edb | 2f2318a5592ccfd4c3cd43c33afa61758d3a9416 | refs/heads/master | 2020-03-23T00:57:10.311380 | 2018-07-13T21:37:50 | 2018-07-13T21:37:50 | 140,893,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | """
WSGI config for rest_api_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rest_api_project.settings")
application = get_wsgi_application()
| [
"manolis@Jessie.debian.org"
] | manolis@Jessie.debian.org |
273d5e6c1723e75812580b3ef222fe5e49de355b | 07a460e0827fe2e7326276d78216b64fcc0cbf43 | /CSCI_1100/Week_7/Lab_6/lab6Files/check0.py | 2145e0c6f2f70146626a257b55ddae97c202b6ff | [] | no_license | bobmayuze/RPI_Education_Material | 2b1a122b70ec420f9a63fe1e26c47ad2d506cd9b | 202a25239f4c50529012b0c9163ca059bf7fc82b | refs/heads/master | 2020-12-01T21:01:09.059103 | 2017-02-17T18:29:37 | 2017-02-17T18:29:37 | 67,041,284 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | string = ''
for i in range(0,9):
string += str(i)+' '
print(string)
rows = 9
cols = 9
grid = []
for y in range(rows):
grid.append([0]*cols)
for y in range(rows):
for x in range(cols):
grid[y][x] = "{},{}".format(y,x)
# print("{},{}".format(y,x))
s = ""
for i in range(len(grid)):
for p in range(len(grid[i])):
s += "{:4}".format(grid[i][p])
if p == 2 or p ==5:
s += " "
if i ==2 or i == 5:
s += "\n"
print(s)
s = ""
| [
"584653629@qq.com"
] | 584653629@qq.com |
79a8e4c562139987c47fe34f81f4bc9c48703f36 | 3db7b5409f2f9c57ab3f98bda50f8b548d98063d | /samples/tests/test_model_samples.py | ed82dd678c2f104779586f523aeefb3e7b00a9f1 | [
"Apache-2.0"
] | permissive | googleapis/python-bigquery | 66db156b52e97565f6211b2fab5aac4e519fa798 | 3645e32aeebefe9d5a4bc71a6513942741f0f196 | refs/heads/main | 2023-09-01T07:41:24.893598 | 2023-08-23T19:04:13 | 2023-08-23T19:04:13 | 226,992,475 | 622 | 287 | Apache-2.0 | 2023-09-12T04:31:26 | 2019-12-10T00:09:04 | Python | UTF-8 | Python | false | false | 1,507 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from .. import delete_model
from .. import get_model
from .. import list_models
from .. import update_model
if typing.TYPE_CHECKING:
import pytest
def test_model_samples(
capsys: "pytest.CaptureFixture[str]", dataset_id: str, model_id: str
) -> None:
"""Since creating a model is a long operation, test all model samples in
the same test, following a typical end-to-end flow.
"""
get_model.get_model(model_id)
out, err = capsys.readouterr()
assert model_id in out
list_models.list_models(dataset_id)
out, err = capsys.readouterr()
assert "Models contained in '{}':".format(dataset_id) in out
update_model.update_model(model_id)
out, err = capsys.readouterr()
assert "This model was modified from a Python program." in out
delete_model.delete_model(model_id)
out, err = capsys.readouterr()
assert "Deleted model '{}'.".format(model_id) in out
| [
"noreply@github.com"
] | noreply@github.com |
f29914a515f74f76b34be9f8770ba18e37935362 | 401d9e394626c143eecbe2079b6a94fa4c896c05 | /crawler/fdThreads/fd_to_thread.py | 7b464a19b005d18ed820c5a144e67eb622c4fbc8 | [] | no_license | paldosh/perceive | f94dc0e05814bb7825a4fa180829eeb54bb4df3a | 2d67cdec04970446057c7da4532ac0f5192b4fa4 | refs/heads/master | 2020-12-28T23:36:08.667215 | 2017-01-20T04:51:12 | 2017-01-20T04:51:12 | 66,851,770 | 0 | 0 | null | 2016-09-01T00:35:36 | 2016-08-29T14:36:56 | null | UTF-8 | Python | false | false | 899 | py | #script to get the thread FD data in the format required for Running LDA
import csv
import sys
def get_data(f):
try:
f2= open(f)
d=f2.read().strip()
f2.close()
except IOError:
print "error"
d=""
return d
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage: <Name of the file>"
exit(0)
fname=sys.argv[1]
j=0
with open(fname) as f:
reader = csv.DictReader(f)
for row in reader:
ids=row['threadtId'].strip()
reply_id=row['reply_IDs'].strip()
reply_ids=reply_id.split(",")
i=".".join((ids,"txt"))
text=get_data(i)
jf="_".join(("thread",str(j)))
jfile=".".join((jf,"txt"))
fn= open(jfile,'wb')
for r in reply_ids:
f1=".".join((r,"txt"))
if f1==".txt":
continue
else:
t=get_data(f1)
#print t
text="".join((text,t))
#print text
fn.write(text)
j=j+1 | [
"noreply@github.com"
] | noreply@github.com |
49850af963a65360d1c97ece208d22db0d8b0796 | ef4f8e5505077799eef1e333c681eba2f5b91eb7 | /proyectoLeVents/manage.py | 193ee00e952eab2fa446208afba8a2d54ff2036e | [] | no_license | DonRata/Le-Vents | 9b9a44e7a25972f7ff32ac1b83e9766d505b2d90 | 7b668565732f218b6e5bfd2bc002dfd11b45a060 | refs/heads/master | 2021-06-22T19:11:18.093227 | 2019-12-02T14:16:14 | 2019-12-02T14:16:14 | 215,043,028 | 0 | 0 | null | 2021-06-10T22:12:40 | 2019-10-14T12:50:52 | Python | UTF-8 | Python | false | false | 627 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Levents.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"emilio.r.cruz@gmail.com"
] | emilio.r.cruz@gmail.com |
ca418afeb7226b1ed8850883ad982e55f778e9c6 | b6832369607b214a3b491262f135b818e242e6c3 | /catalog/urls.py | 6c9bf7190243e6befa7456b45a60becd2e69c53c | [] | no_license | khazraer/Test | a55b103c462450e3b05f46b01d6746b800aa0971 | 17da0270978abd6a72d4058b41e03b8110d35984 | refs/heads/master | 2020-07-26T02:23:50.649017 | 2019-09-14T22:17:59 | 2019-09-14T22:17:59 | 208,503,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py |
from django.urls import path
from catalog import views
urlpatterns = [
path('', views.index, name='index'),
path('book/',views.BookListView.as_view(),name = 'books'),
path('book/<int:pk>',views.BookDetailView.as_view(),name = 'book-detail')
]
| [
"raedkhazeem@Raeds-MacBook-Pro.local"
] | raedkhazeem@Raeds-MacBook-Pro.local |
fa28e044a716da27bba63669641a6930120d6137 | 2a538cffa543855a2c0c1b4485a0455b9673efdd | /users/migrations/0005_remove_profile_name.py | 3b9e8eeaf8d21b482426080b9df7b6ca1296226e | [] | no_license | FLIQ1-0/FLIQ1.0 | dbf0ae92cf613d9a9813122998934e4ceedc9790 | 97109d62a89b9184052c349b09d28b097c1bdcf6 | refs/heads/main | 2023-06-30T01:39:28.560442 | 2021-07-30T07:15:06 | 2021-07-30T07:15:06 | 390,750,477 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # Generated by Django 3.0.8 on 2020-08-13 14:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20200812_0907'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='name',
),
]
| [
"noreply@github.com"
] | noreply@github.com |
42226fdcd5ceb2549588906b5c91eab8af344a0f | cbc9b9966f4dd25d9e77f1b85b4179ee372a3eea | /tests/utils.py | 720371037fa93e53103cafa2b25bfa8951edccc7 | [
"Apache-2.0"
] | permissive | uber/uber-poet | f58b8f264f8e2f4c488104f41d3146fca380f648 | 580a52f428014861847ea59bb838ffb4e060b84d | refs/heads/master | 2023-09-03T16:04:26.824597 | 2021-06-16T22:21:39 | 2021-06-16T22:21:39 | 156,401,298 | 95 | 14 | Apache-2.0 | 2022-01-12T04:01:10 | 2018-11-06T15:04:46 | Python | UTF-8 | Python | false | false | 916 | py | # Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def do_nothing(_):
pass
def integration_test(func):
if 'INTEGRATION' in os.environ:
return func
else:
return do_nothing
def read_file(path):
with open(path, 'r') as f:
return f.read()
def write_file(path, text):
with open(path, 'w') as f:
f.write(text)
| [
"mahyar@uber.com"
] | mahyar@uber.com |
611a50956c59cb945db54ff980d7e159427332e7 | da0fd3464feae5e77469b2ff2e727388fb806335 | /btre/urls.py | 00804af15a0f5a9e3c691e5b885194cb426ea548 | [] | no_license | mim-omid/Django_RealEstate | d02ee8c829138fb1dc5916621b2ad1db97456a48 | 17cefeb502594274ed505474e03d54aca4930399 | refs/heads/master | 2020-05-22T14:42:40.226353 | 2019-05-18T11:10:43 | 2019-05-18T11:10:43 | 186,393,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | """btre URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('',include('pages.urls')),
path('listings/',include('listings.urls')),
path('admin/', admin.site.urls),
]
| [
"mr.mim.omid@gmail.com"
] | mr.mim.omid@gmail.com |
56531f41048eb1678833782d2cbcab5c2079f65c | e02ae56bdd80ad660c3fe455641e74b5ae8ba765 | /FluentPython/unit17/demo_executor_map.py | 150bb87106ed622508c340713fcd8082e980702f | [] | no_license | RiderLai/pytest | aa1f7115186c274856996fa3c9cce2e0982fabc2 | e939398353efe624d4808d0024a85762a235ca9a | refs/heads/master | 2020-04-14T01:16:59.678025 | 2020-02-12T12:02:49 | 2020-02-12T12:02:49 | 163,556,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | from time import sleep, strftime
from concurrent import futures
def display(*args):
print(strftime('[%H:%M:%S]'), end=' ')
print(*args)
def loiter(n):
msg = '{}loiter({}): doing nothing for {}s...'
display(msg.format('\t'*n, n, n))
sleep(n)
msg = '{}loiter({}): done.'
display(msg.format('\t'*n, n))
return n * 10
def main():
display('Script starting.')
executor = futures.ThreadPoolExecutor(max_workers=3)
results = executor.map(loiter, range(5))
display('results:', results)
display('Waiting for individual results:')
for i, result in enumerate(results):
display('result {}: {}'.format(i, result))
display('done bye bye')
main() | [
"lyf670354671@gmail.com"
] | lyf670354671@gmail.com |
a43a6ca183fe13cab45ff1ffe654cb22df55bdd3 | b3f6daa5d6c987eb8a61d5fe125bf2a98997e259 | /8kyu/Simple multiplication/index.py | 0853411208f8f60cc3ab604295bcd6f49ea44358 | [] | no_license | krnets/codewars-practice | 53a0a6c9d2d8c2b94d6799a12f48dd588179a5ce | 5f8e1cc1aebd900b9e5a276884419fc3e1ddef24 | refs/heads/master | 2022-12-20T19:33:43.337581 | 2022-12-16T05:32:39 | 2022-12-16T05:32:39 | 217,464,785 | 1 | 0 | null | 2020-07-20T08:36:31 | 2019-10-25T06:20:41 | JavaScript | UTF-8 | Python | false | false | 668 | py | # 8kyu - Simple multiplication
""" This kata is about multiplying a given number by eight if it is an even number and by nine otherwise. """
# def simple_multiplication(number):
# return 8 * number if number % 2 == 0 else 9 * number
# def simple_multiplication(number):
# return number * (8 if number % 2 == 0 else 9)
# def simple_multiplication(number):
# return number * [8, 9][number % 2]
def simple_multiplication(number):
return number * (8 + number % 2)
q = simple_multiplication(2) # 16
q
q = simple_multiplication(1) # 9
q
q = simple_multiplication(8) # 64
q
q = simple_multiplication(4) # 32
q
q = simple_multiplication(5) # 45
q
| [
"cmantheo@gmail.com"
] | cmantheo@gmail.com |
eb484998751392468bf30d8a2fb2c3ba3ede7764 | 318d26f99fc8e61b9da4669edb72f400b7fd26d3 | /venv/Scripts/pycolor-script.py | f3417a87f6dafedb62f79b400dd596547c1db112 | [] | no_license | lsferreira934/Exercicios-em-Python | 880a8221a00de43a429c216153be6cba043c8369 | 7f59f8c1dafdd23a68189bf9123c818e511d2858 | refs/heads/master | 2020-12-22T17:19:03.982669 | 2020-01-29T00:42:52 | 2020-01-29T00:42:52 | 236,871,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #!C:\Users\Leandro\PycharmProject\CursoemVideo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'color==0.1','console_scripts','pycolor'
__requires__ = 'color==0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('color==0.1', 'console_scripts', 'pycolor')()
)
| [
"lsfereira934@yahoo.com.br"
] | lsfereira934@yahoo.com.br |
4d67978e52e4e83c73dd638f7a3dce705eb9f725 | 7ea3fd8b778fa47b9e7f2c91b094a50a7dd34cf2 | /Ch. 8 Exercise 5.py | ff97daa2a931cafc0c1528f65b68b017cd2be51d | [] | no_license | jenjade/class-work | 7c4ee0d215fa3a16806c7d6259f06d84c579f146 | af0d1281cdd64898afebcf87220a22ee50aa2569 | refs/heads/master | 2020-03-16T21:16:24.754108 | 2018-05-11T05:31:37 | 2018-05-11T05:31:37 | 132,992,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | fhand = open('mbox-short.txt')
count = 0
search_word = 'From'
for line in fhand:
dict()
words = line.split()
#print ('Debug:', words)
if len(words) == 0 : continue
if words[0] != 'From' : continue
print(words[2])
count += 1
print ('The word From occurs', count, 'times.')
| [
"jenjade@uw.edu"
] | jenjade@uw.edu |
c4629c6296276f6dd000ac6acc97097972160f92 | 4755dabdcff6a45b9c15bf9ea814c6b8037874bd | /build/laser_proc/catkin_generated/pkg.installspace.context.pc.py | 8aa2d2e231584bb4c6aa2e425d2a5cc3e336be50 | [] | no_license | Rallstad/RobotSnake | 676a97bdfde0699736d613e73d539929a0c2b492 | 37ee6d5af0458b855acf7c2b83e0ee17833dbfd1 | refs/heads/master | 2023-01-03T05:46:46.268422 | 2018-05-27T16:01:47 | 2018-05-27T16:01:47 | 308,665,980 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/snake/Documents/catkin_ws/install/include".split(';') if "/home/snake/Documents/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;sensor_msgs;rosconsole;nodelet".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llaser_proc_library;-llaser_publisher;-llaser_transport;-llaser_proc_ROS;-lLaserProcNodelet".split(';') if "-llaser_proc_library;-llaser_publisher;-llaser_transport;-llaser_proc_ROS;-lLaserProcNodelet" != "" else []
PROJECT_NAME = "laser_proc"
PROJECT_SPACE_DIR = "/home/snake/Documents/catkin_ws/install"
PROJECT_VERSION = "0.1.4"
| [
"vetle.fredriksen@gmail.com"
] | vetle.fredriksen@gmail.com |
5733282b347437017239c5826413bf88554b2717 | 2d8573859614560cd550df021cf46901803dd18a | /merdecoin-v0.01/test/functional/feature_reindex.py | 123ee0a7d555cb17a50dc57592b3ad2ba3ad49e7 | [
"MIT"
] | permissive | merde-libertas/merdecoin | d0cd34a84e8affbf42b99c8acd1cc692a1767b26 | 35f8a54db8d322d0bd8534822f0e88aab274e539 | refs/heads/master | 2023-05-26T20:43:23.516846 | 2021-06-10T10:24:07 | 2021-06-10T10:24:07 | 369,151,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Merdecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running merdecoind with -reindex and -reindex-chainstate options.
- Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
"""
from test_framework.test_framework import MerdecoinTestFramework
from test_framework.util import wait_until
class ReindexTest(MerdecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def reindex(self, justchainstate=False):
self.nodes[0].generatetoaddress(3, self.nodes[0].get_deterministic_priv_key().address)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex"]]
self.start_nodes(extra_args)
wait_until(lambda: self.nodes[0].getblockcount() == blockcount)
self.log.info("Success")
def run_test(self):
self.reindex(False)
self.reindex(True)
self.reindex(False)
self.reindex(True)
if __name__ == '__main__':
ReindexTest().main()
| [
"phillipwilkinson@hotmail.com"
] | phillipwilkinson@hotmail.com |
879913c4b70bee4502b035d12d1c323881a4572e | 087594210fbc0c8d417368d9ea347748eba77206 | /graphical pass point/cryptography.py | 3b5dbae3665e91c0db7b85fbe2a39f0ab871ee22 | [] | no_license | hazelclement/python-programs | f663055a3f62a96684f31e25dfd47903cb0feca6 | 055c5f8922126119bc8e74d6c0f17569d280c23a | refs/heads/master | 2022-12-01T16:05:09.048514 | 2020-07-21T17:46:01 | 2020-07-21T17:46:01 | 281,458,353 | 0 | 0 | null | 2020-07-21T17:46:03 | 2020-07-21T17:11:41 | Python | UTF-8 | Python | false | false | 241 | py |
message = 'This is program to explain reverse cipher.'
translated = '' #cipher text is stored in this variable
i = len(message) - 1
while i >= 0:
translated = translated + message[i]
i = i - 1
print("The cipher text is ", translated) | [
"hazelclement15@gmail.com"
] | hazelclement15@gmail.com |
14895c387790963bdf7116b1e8c8c4f7091456ff | bd61ecbc7696eb499b0658c5d636e93bf2f94844 | /tutorials/W08_AutoEncoders_GANs/solutions/W8_Tutorial1_Solution_Ex05.py | db284bd51b1651e82d322ddcabf0f27c929a1b38 | [] | no_license | fyshelab/course-content | a8587f23a8fcfd692694cfddcbf5879a6cbbc2ec | 353d738d76a352a09e058569d6692d5944a3975a | refs/heads/main | 2023-04-19T19:40:01.129935 | 2021-05-14T19:56:19 | 2021-05-14T19:56:19 | 364,661,485 | 1 | 1 | null | 2021-05-05T17:50:57 | 2021-05-05T17:50:56 | null | UTF-8 | Python | false | false | 4,138 | py | class ConvVAE(nn.Module):
def __init__(self, K, num_filters=32, filter_size=5):
super(ConvVAE, self).__init__()
# With padding=0, the number of pixels cut off from each image dimension
# is filter_size // 2. Double it to get the amount of pixels lost in
# width and height per Conv2D layer, or added back in per
# ConvTranspose2D layer.
filter_reduction = 2 * (filter_size // 2)
# After passing input through two Conv2d layers, the shape will be
# 'shape_after_conv'. This is also the shape that will go into the first
# deconvolution layer in the decoder
self.shape_after_conv = (num_filters,
my_dataset_size[1]-2*filter_reduction,
my_dataset_size[2]-2*filter_reduction)
flat_size_after_conv = self.shape_after_conv[0] \
* self.shape_after_conv[1] \
* self.shape_after_conv[2]
# Define the recognition model (encoder or q) part
self.q_bias = BiasLayer(my_dataset_size)
self.q_conv_1 = nn.Conv2d(my_dataset_size[0], num_filters, 5)
self.q_conv_2 = nn.Conv2d(num_filters, num_filters, 5)
self.q_flatten = nn.Flatten()
self.q_fc_phi = nn.Linear(flat_size_after_conv, K+1)
# Define the generative model (decoder or p) part
self.p_fc_upsample = nn.Linear(K, flat_size_after_conv)
self.p_unflatten = nn.Unflatten(-1, self.shape_after_conv)
self.p_deconv_1 = nn.ConvTranspose2d(num_filters, num_filters, 5)
self.p_deconv_2 = nn.ConvTranspose2d(num_filters, my_dataset_size[0], 5)
self.p_bias = BiasLayer(my_dataset_size)
# Define a special extra parameter to learn scalar sig_x for all pixels
self.log_sig_x = nn.Parameter(torch.zeros(()))
def infer(self, x):
"""Map (batch of) x to (batch of) phi which can then be passed to
rsample to get z
"""
s = self.q_bias(x)
s = F.relu(self.q_conv_1(s))
s = F.relu(self.q_conv_2(s))
flat_s = s.view(s.size()[0], -1)
phi = self.q_fc_phi(flat_s)
return phi
def generate(self, zs):
"""Map [b,n,k] sized samples of z to [b,n,p] sized images
"""
# Note that for the purposes of passing through the generator, we need
# to reshape zs to be size [b*n,k]
b, n, k = zs.size()
s = zs.view(b*n, -1)
s = F.relu(self.p_fc_upsample(s)).view((b*n,) + self.shape_after_conv)
s = F.relu(self.p_deconv_1(s))
s = self.p_deconv_2(s)
s = self.p_bias(s)
mu_xs = s.view(b, n, -1)
return mu_xs
def forward(self, x):
# VAE.forward() is not used for training, but we'll treat it like a
# classic autoencoder by taking a single sample of z ~ q
phi = self.infer(x)
zs = rsample(phi, 1)
return self.generate(zs).view(x.size())
def elbo(self, x, n=1):
"""Run input end to end through the VAE and compute the ELBO using n
samples of z
"""
phi = self.infer(x)
zs = rsample(phi, n)
mu_xs = self.generate(zs)
return log_p_x(x, mu_xs, self.log_sig_x.exp()) - kl_q_p(zs, phi)
def train_vae(vae, dataset, epochs=10, n_samples=16):
opt = torch.optim.Adam(vae.parameters(), lr=0.001, weight_decay=1e-6)
elbo_vals = []
vae.to(DEVICE)
vae.train()
loader = DataLoader(dataset, batch_size=100, shuffle=True, pin_memory=True)
for epoch in trange(epochs, desc='Epochs'):
for im, _ in tqdm(loader, total=len(dataset)//100, desc='Batches', leave=False):
im = im.to(DEVICE)
opt.zero_grad()
loss = -vae.elbo(im)
loss.backward()
opt.step()
elbo_vals.append(-loss.item())
vae.to('cpu')
vae.eval()
return elbo_vals
vae = ConvVAE(K=K)
elbo_vals = train_vae(vae, my_dataset, n_samples=10)
print(f'Learned sigma_x is {torch.exp(vae.log_sig_x)}')
plt.figure()
plt.plot(elbo_vals)
plt.xlabel('Batch #')
plt.ylabel('ELBO')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
1337748aee1def7513208e92e38858d34f0ed411 | 50fcaacdb31ae4083749266286f2ac6efcaef97a | /backend/base/products.py | 3cf6d7f8fa418fd6335f7a4a4835474b925d4abe | [] | no_license | TannerGS/react-django-ecommerce | cfc587a3fe5941cbc4eedeaf291bd05564329231 | a44242a28229dc1574d1293416f1feb213e70745 | refs/heads/master | 2023-05-05T06:51:33.117588 | 2021-05-25T18:25:55 | 2021-05-25T18:25:55 | 364,300,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,735 | py | products = [
{
'_id': '1',
'name': 'Airpods Wireless Bluetooth Headphones',
'image': '/images/airpods.jpg',
'description':
'Bluetooth technology lets you connect it with compatible devices wirelessly High-quality AAC audio offers immersive listening experience Built-in microphone allows you to take calls while working',
'brand': 'Apple',
'category': 'Electronics',
'price': 89.99,
'countInStock': 10,
'rating': 4.5,
'numReviews': 12,
},
{
'_id': '2',
'name': 'iPhone 11 Pro 256GB Memory',
'image': '/images/phone.jpg',
'description':
'Introducing the iPhone 11 Pro. A transformative triple-camera system that adds tons of capability without complexity. An unprecedented leap in battery life',
'brand': 'Apple',
'category': 'Electronics',
'price': 599.99,
'countInStock': 0,
'rating': 4.0,
'numReviews': 8,
},
{
'_id': '3',
'name': 'Cannon EOS 80D DSLR Camera',
'image': '/images/camera.jpg',
'description':
'Characterized by versatile imaging specs, the Canon EOS 80D further clarifies itself using a pair of robust focusing systems and an intuitive design',
'brand': 'Cannon',
'category': 'Electronics',
'price': 929.99,
'countInStock': 5,
'rating': 3,
'numReviews': 12,
},
{
'_id': '4',
'name': 'Sony Playstation 4 Pro White Version',
'image': '/images/playstation.jpg',
'description':
'The ultimate home entertainment center starts with PlayStation. Whether you are into gaming, HD movies, television, music',
'brand': 'Sony',
'category': 'Electronics',
'price': 399.99,
'countInStock': 11,
'rating': 5,
'numReviews': 12,
},
{
'_id': '5',
'name': 'Logitech G-Series Gaming Mouse',
'image': '/images/mouse.jpg',
'description':
'Get a better handle on your games with this Logitech LIGHTSYNC gaming mouse. The six programmable buttons allow customization for a smooth playing experience',
'brand': 'Logitech',
'category': 'Electronics',
'price': 49.99,
'countInStock': 7,
'rating': 3.5,
'numReviews': 10,
},
{
'_id': '6',
'name': 'Amazon Echo Dot 3rd Generation',
'image': '/images/alexa.jpg',
'description':
'Meet Echo Dot - Our most popular smart speaker with a fabric design. It is our most compact smart speaker that fits perfectly into small space',
'brand': 'Amazon',
'category': 'Electronics',
'price': 29.99,
'countInStock': 0,
'rating': 4,
'numReviews': 12,
},
]
| [
"tannersimpkins@gmail.com"
] | tannersimpkins@gmail.com |
20f8fb5a190e77a3ff5fccec0605de447e4a5d3d | cd612a871df90cc010f7836cbdaf40afba401016 | /blog/myblog/migrations/0003_auto_20170422_2009.py | a9145bd8176b513289b7db8152aff89e764a1048 | [] | no_license | nicexp/myblog | 64db95bb06b52d09bae4322f5ba9bdf44cd4781a | 1856d54cb7f302501c425febe0f5a30a89697d60 | refs/heads/master | 2021-01-20T14:59:51.313194 | 2017-05-09T04:42:56 | 2017-05-09T04:42:56 | 90,703,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-22 12:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myblog', '0002_auto_20170422_2005'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='img_path',
field=models.FilePathField(blank=True, null=True),
),
]
| [
"419797567@qq.com"
] | 419797567@qq.com |
bf1fd77267d8c4bca07a45429c04b6b67db22867 | 0970a4aab1070bdb28b799fab2cf61693c4ccab2 | /src/tf-idf.py | 120bfc4d0f4f88914b5d27c3c389f38a84926175 | [] | no_license | nickzhangf/machine-learning | ca53842ab0a07e7e6df8317e1ecc09e3dd0fc958 | a2376319c3a0b1fac315153f72f780f0402b7271 | refs/heads/master | 2020-12-29T02:39:02.406050 | 2017-02-13T02:33:34 | 2017-02-13T02:33:34 | 52,136,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # coding:utf-8
__author__ = "liuxuejiang"
import jieba
import jieba.posseg as pseg
import os
import sys
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
if __name__ == "__main__":
corpus = ["我 来到 北京 清华大学", # 第一类文本切词后的结果,词之间以空格隔开
"他 来到 了 网易 杭研 大厦", # 第二类文本的切词结果
"小明 硕士 毕业 与 中国 科学院", # 第三类文本的切词结果
"我 爱 北京 天安门"] # 第四类文本的切词结果
vectorizer = CountVectorizer() # 该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频
transformer = TfidfTransformer() # 该类会统计每个词语的tf-idf权值
tfidf = transformer.fit_transform(
vectorizer.fit_transform(corpus)) # 第一个fit_transform是计算tf-idf,第二个fit_transform是将文本转为词频矩阵
word = vectorizer.get_feature_names() # 获取词袋模型中的所有词语
weight = tfidf.toarray() # 将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重
for i in range(len(weight)): # 打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重
print u"-------这里输出第", i, u"类文本的词语tf-idf权重------"
for j in range(len(word)):
print word[j], weight[i][j]
| [
"fengz2008@163.com"
] | fengz2008@163.com |
0d9c589064bdfa802bbc69912c2b119c8b1a3167 | 5b3d8b5c612c802fd846de63f86b57652d33f672 | /Python/seven_kyu/to_jaden_case.py | 6f1011c1120d950fcc87a4462cab4f25505b6208 | [
"Apache-2.0"
] | permissive | Brokenshire/codewars-projects | 1e591b57ed910a567f6c0423beb194fa7f8f693e | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | refs/heads/master | 2021-07-22T18:50:25.847592 | 2021-01-25T23:27:17 | 2021-01-25T23:27:17 | 228,114,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | # Python solution for 'Jaden Casing Strings' codewars question.
# Level: 7 kyu
# Tags: Fundamentals, Strings, and Arrays.
# Author: Jack Brokenshire
# Date: 17/02/2020
import unittest
def to_jaden_case(string):
"""
Your task is to convert strings to how they would be written by Jaden Smith. The strings are actual quotes from
Jaden Smith, but they are not capitalized in the same way he originally typed them.
:param string: A string value input.
:return: A new string with each word in the sentence capitalized.
"""
return " ".join(x.capitalize() for x in string.split())
class TestToJadenCase(unittest.TestCase):
"""Class to test 'to_jaden_case' function"""
def test_name_list(self):
quote = "How can mirrors be real if our eyes aren't real"
self.assertEqual(to_jaden_case(quote), "How Can Mirrors Be Real If Our Eyes Aren't Real")
if __name__ == '__main__':
unittest.main()
| [
"29889878+Brokenshire@users.noreply.github.com"
] | 29889878+Brokenshire@users.noreply.github.com |
11e9e11dc871aea4258733931469fe48088b6a3c | 91d77c5cde6a83457b5b0b7baf9f4d04398c7f2b | /mysite/settings.py | 20ca551958cd2bd9b987d00991a4b64623910dad | [] | no_license | niksamoyloff/blog | 727835a9bb47dcd7766ca4ad6cc684e02c738ff5 | 27f4a0f891f9a480100d4507dd65065f152f6560 | refs/heads/master | 2021-06-09T22:52:17.619888 | 2017-01-09T11:24:08 | 2017-01-09T11:24:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,532 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ki2t_tcxer5h$j211^bt=!96+du0+apk4uxxghde9j%0k3&a*%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'djangogirls',
'USER': 'postgres',
'PASSWORD': '12345',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/'
import dj_database_url
DATABASES['default'] = dj_database_url.config()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['*']
DEBUG = False
try:
from .local_settings import *
except ImportError:
pass | [
"niksamoyloff@gmail.com"
] | niksamoyloff@gmail.com |
d807a7d1a649fac018c6da8614952df89a7cdc5e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_snowboard.py | 5100be7c8c861988ab39e3be570cce2fce7b2eba | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py |
#calss header
class _SNOWBOARD():
def __init__(self,):
self.name = "SNOWBOARD"
self.definitions = [u'to slide on the snow by standing on a specially shaped board: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e8d69dc71a1291dee1d8938c549171c4d1c602aa | e58307f680fb8f975b7c47b0dda98efed2e0675f | /main.py | cc000d07d16a2bd8c8c0d075c5c6308c7361631d | [] | no_license | yeahdef/albumRipIdentify | af49fc83cf152c18e3d65ac6a35951e089322b9c | 266b043fb981e53cc7d26e6f2b382a71b0ddba81 | refs/heads/master | 2020-03-16T00:02:51.928628 | 2018-05-07T04:28:15 | 2018-05-07T04:28:15 | 132,406,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,589 | py | from mutagen.easyid3 import EasyID3
from dateutil.parser import parse
from mutagen.mp3 import MP3
from time import sleep
from tqdm import tqdm
import discogs_client
import argparse
import mutagen
import difflib
import fnmatch
import pickle
import sys
import os
parser = argparse.ArgumentParser(description='Slice mp3 recording into tracks, edit ID3 via discogs collection lookup, and add to iTunes.')
parser.add_argument('--artist', type=str, help='Artist Name')
parser.add_argument('--title', type=str, help='Release Title')
parser.add_argument('--filename', type=str, help='Recording File Name', required=True)
args = parser.parse_args()
# TODO: Create classes for these objects
class Recording:
pass
class Collection:
pass
class CollectionList:
pass
def splitRecording(filename):
os.system("mp3splt -s -p th=-30,min=3,rm {}.mp3".format(filename))
def getRecordingContext(filename):
recordingContext = []
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '{}_silence_*.mp3'.format(filename)):
audio = MP3(file)
m = int(audio.info.length / 60)
s = int(audio.info.length % 60)
recordingContext.append(int(audio.info.length))
return recordingContext
def editMetadata(filename, winningCandidate):
filesToEdit = []
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '{}_silence_*.mp3'.format(filename)):
filesToEdit.append(file)
for i, f in enumerate(filesToEdit):
try:
meta = EasyID3(f)
except mutagen.id3.ID3NoHeaderError:
meta = mutagen.File(f, easy=True)
meta.add_tags()
meta["artist"] = winningCandidate[1]
meta["album"] = winningCandidate[2]
meta["date"] = str(winningCandidate[3])
meta["genre"] = winningCandidate[4]
meta["title"] = winningCandidate[5][i]
meta["tracknumber"] = str(i)
meta.save()
dst = "{} - {}.mp3".format(winningCandidate[1], winningCandidate[5][i])
os.rename(f, dst)
def getDiscogsCollection():
# TODO: gather values from local env.
d = discogs_client.Client("CLIENTIDSTRING", user_token="USERTOKEN")
joey = d.identity()
return joey.collection_folders[0].releases.sort("added", "desc")
def collectionToCollectionList(collection):
print("downloading collection...")
collectionlist = []
for album in tqdm(collection):
sleep(1)
collectionlist.append([
album.data['date_added'],
album.release.artists[0].name,
album.release.title,
album.release.year,
album.release.genres[0],
[t.title for t in album.release.tracklist],
[t.duration for t in album.release.tracklist],
])
return collectionlist
def pickleCollection(collectionlist):
output = open('collectionlist.pkl', 'wb')
print("pickling collection...")
pickle.dump(collectionlist, output)
def unpickleCollection(filename='collectionlist.pkl'):
pkl_file = open(filename, 'rb')
return pickle.load(pkl_file)
def discogsCollectionChanged(collection):
c = unpickleCollection()
latestDateFromPickle = parse(c[0][0])
latestDateFromDiscogs = parse(collection[0].data['date_added'])
if latestDateFromDiscogs != latestDateFromPickle:
return True
else:
return False
def getReleaseCandidates(collectionlist, recordingContext):
candidates = []
for album in collectionlist:
releaseContext = []
for duration in album[6]:
if len(duration) > 0:
m, s = duration.split(":")
duration = int(m) * 60 + int(s)
releaseContext.append(duration)
# if recording context is similar to collection release context, add it to candidates list
# first, at a minimum we need the same number of tracks
if len(releaseContext) == len(recordingContext):
distances = []
for i, dur in enumerate(releaseContext):
distances.append(abs(dur - recordingContext[i]))
totalDistance = sum(distances)
# if they have a high ratio of numeric similarity (fuzzy like-ness), pass them
if totalDistance < (7 * len(releaseContext)):
candidates.append(album)
return candidates
if __name__ == "__main__":
splitRecording(args.filename)
RC = getRecordingContext(args.filename)
collection = getDiscogsCollection()
if not os.path.isfile("collectionlist.pkl"):
pickleCollection(collection)
if discogsCollectionChanged(collection):
saveNewCollectionAdditions()
collectionlist = unpickleCollection()
candidates = getReleaseCandidates(collectionlist, RC)
if len(candidates) == 1:
print candidates[0]
# edit Id3 tags
editMetadata(args.filename, candidates[0])
# add to iTunes
# initiate upload | [
"josephliechty@josephs-air.gateway.pace.com"
] | josephliechty@josephs-air.gateway.pace.com |
aba1b20ca910395e8e556c928a2bf6e5d53cdac8 | 2d8da5cacd21dd425688d67e1a92faa50aefc6bc | /excel-sheet-column-number.py | c90dd1c70703b45a9911aa35628d96708bba7730 | [] | no_license | stella-shen/Leetcode | 970857edb74ae3ccf4bcce0c40e972ab8bcc5348 | 16ad99a6511543f0286559c483206c43ed655ddd | refs/heads/master | 2021-01-19T02:48:49.918054 | 2018-11-29T10:36:43 | 2018-11-29T10:36:43 | 47,523,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py |
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
ret = 0
for i in xrange(len(s)):
ret *= 26
ret += ord(s[i]) - ord('A') + 1
return ret
if __name__ == '__main__':
sol = Solution()
s = "AB"
print sol.titleToNumber(s)
| [
"szsxt818@gmail.com"
] | szsxt818@gmail.com |
751a74264a973fe1ab989c874cc4a9a039bd45e4 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_55/373.py | 65e1a8ea83c63527538a4d324820da9d12a0a74e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,581 | py | '''
Created on May 9, 2010
@author: indra
'''
import sys, os
filename = "C-large"
path = os.path.normpath(os.path.join(os.path.dirname(__file__), filename+".in"))
reader = open(path, "rb")
path = os.path.normpath(os.path.join(os.path.dirname(__file__), filename+".out"))
writer = open(path,"w")
ncases = int(reader.readline().rstrip())
caseno = 0
while caseno<ncases:
caseno+=1
case = reader.readline().rstrip()
R,k,N = [int(x) for x in case.split(' ')]
case = reader.readline().rstrip()
gps = [int(x) for x in case.split(' ')]
totp = 0
for gp in gps:
totp+=gp
print (R,k,N)
print gps
print totp
if totp<=k:
writer.write("Case #%s: %d\n" % (str(caseno),R*totp))
continue
rides = [-1]*N
money = [0]*N
retmon = 0
curloc = 0
curride = 0
curmon = 0
while rides[curloc]==-1 and curride<R:
rides[curloc] = curride
money[curloc] = curmon
curride+=1
tem=0
while tem+gps[curloc]<=k:
tem+=gps[curloc]
curloc+=1
if curloc>=N:
curloc-=N
curmon+=tem
if curride==R:
writer.write("Case #%s: %d\n" % (str(caseno),curmon))
continue
cycrides = curride - rides[curloc]
cycmoney = curmon - money[curloc]
R-=rides[curloc]
retmon+=money[curloc]
rleft = R%cycrides
retmon += cycmoney*((R-rleft)/cycrides)
lastrides = 0
while lastrides<rleft:
lastrides+=1
tem=0
while tem+gps[curloc]<=k:
tem+=gps[curloc]
curloc+=1
if curloc>=N:
curloc-=N
retmon+=tem
writer.write("Case #%s: %d\n" % (str(caseno),retmon))
writer.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f183c720412c131b71409791d712d87142101b8b | 6e8d58340f2be5f00d55e2629052c0bbc9dcf390 | /eggs/SQLAlchemy-0.5.6_dev_r6498-py2.6.egg/sqlalchemy/databases/mysql.py | ba6b026ea29aac857be41bbe8563e904dfc2ff43 | [
"CC-BY-2.5",
"MIT"
] | permissive | JCVI-Cloud/galaxy-tools-prok | e57389750d33ac766e1658838cdb0aaf9a59c106 | 3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c | refs/heads/master | 2021-05-02T06:23:05.414371 | 2014-03-21T18:12:43 | 2014-03-21T18:12:43 | 6,092,693 | 0 | 2 | NOASSERTION | 2020-07-25T20:38:17 | 2012-10-05T15:57:38 | Python | UTF-8 | Python | false | false | 97,080 | py | # -*- fill-column: 78 -*-
# mysql.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MySQL database.
Overview
--------
For normal SQLAlchemy usage, importing this module is unnecessary. It will be
loaded on-demand when a MySQL connection is needed. The generic column types
like :class:`~sqlalchemy.String` and :class:`~sqlalchemy.Integer` will
automatically be adapted to the optimal matching MySQL column type.
But if you would like to use one of the MySQL-specific or enhanced column
types when creating tables with your :class:`~sqlalchemy.Table` definitions,
then you will need to import them from this module::
from sqlalchemy.databases import mysql
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('ittybittyblob', mysql.MSTinyBlob),
Column('biggy', mysql.MSBigInteger(unsigned=True)))
All standard MySQL column types are supported. The OpenGIS types are
available for use via table reflection but have no special support or mapping
to Python classes. If you're using these types and have opinions about how
OpenGIS can be smartly integrated into SQLAlchemy please join the mailing
list!
Supported Versions and Features
-------------------------------
SQLAlchemy supports 6 major MySQL versions: 3.23, 4.0, 4.1, 5.0, 5.1 and 6.0,
with capabilities increasing with more modern servers.
Versions 4.1 and higher support the basic SQL functionality that SQLAlchemy
uses in the ORM and SQL expressions. These versions pass the applicable tests
in the suite 100%. No heroic measures are taken to work around major missing
SQL features- if your server version does not support sub-selects, for
example, they won't work in SQLAlchemy either.
Currently, the only DB-API driver supported is `MySQL-Python` (also referred to
as `MySQLdb`). Either 1.2.1 or 1.2.2 are recommended. The alpha, beta and
gamma releases of 1.2.1 and 1.2.2 should be avoided. Support for Jython and
IronPython is planned.
===================================== ===============
Feature Minimum Version
===================================== ===============
sqlalchemy.orm 4.1.1
Table Reflection 3.23.x
DDL Generation 4.1.1
utf8/Full Unicode Connections 4.1.1
Transactions 3.23.15
Two-Phase Transactions 5.0.3
Nested Transactions 5.0.3
===================================== ===============
See the official MySQL documentation for detailed information about features
supported in any given server release.
Character Sets
--------------
Many MySQL server installations default to a ``latin1`` encoding for client
connections. All data sent through the connection will be converted into
``latin1``, even if you have ``utf8`` or another character set on your tables
and columns. With versions 4.1 and higher, you can change the connection
character set either through server configuration or by including the
``charset`` parameter in the URL used for ``create_engine``. The ``charset``
option is passed through to MySQL-Python and has the side-effect of also
enabling ``use_unicode`` in the driver by default. For regular encoded
strings, also pass ``use_unicode=0`` in the connection arguments::
# set client encoding to utf8; all strings come back as unicode
create_engine('mysql:///mydb?charset=utf8')
# set client encoding to utf8; all strings come back as utf8 str
create_engine('mysql:///mydb?charset=utf8&use_unicode=0')
Storage Engines
---------------
Most MySQL server installations have a default table type of ``MyISAM``, a
non-transactional table type. During a transaction, non-transactional storage
engines do not participate and continue to store table changes in autocommit
mode. For fully atomic transactions, all participating tables must use a
transactional engine such as ``InnoDB``, ``Falcon``, ``SolidDB``, `PBXT`, etc.
Storage engines can be elected when creating tables in SQLAlchemy by supplying
a ``mysql_engine='whatever'`` to the ``Table`` constructor. Any MySQL table
creation option can be specified in this syntax::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
Keys
----
Not all MySQL storage engines support foreign keys. For ``MyISAM`` and
similar engines, the information loaded by table reflection will not include
foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT``` on
an integer primary key column::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by supplying ``autoincrement=False`` to the
:class:`~sqlalchemy.Column`. This flag can also be used to enable
auto-increment on a secondary column in a multi-column key for some storage
engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
SQL Mode
--------
MySQL SQL modes are supported. Modes that enable ``ANSI_QUOTES`` (such as
``ANSI``) require an engine option to modify SQLAlchemy's quoting style.
When using an ANSI-quoting mode, supply ``use_ansiquotes=True`` when
creating your ``Engine``::
create_engine('mysql://localhost/test', use_ansiquotes=True)
This is an engine-wide option and is not toggleable on a per-connection basis.
SQLAlchemy does not presume to ``SET sql_mode`` for you with this option. For
the best performance, set the quoting style server-wide in ``my.cnf`` or by
supplying ``--sql-mode`` to ``mysqld``. You can also use a
:class:`sqlalchemy.pool.Pool` listener hook to issue a ``SET SESSION
sql_mode='...'`` on connect to configure each connection.
If you do not specify ``use_ansiquotes``, the regular MySQL quoting style is
used by default.
If you do issue a ``SET sql_mode`` through SQLAlchemy, the dialect must be
updated if the quoting style is changed. Again, this change will affect all
connections::
connection.execute('SET sql_mode="ansi"')
connection.dialect.use_ansiquotes = True
MySQL SQL Extensions
--------------------
Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid MySQL statement can be executed as a string as well.
Some limited direct support for MySQL extensions to SQL is currently
available.
* SELECT pragma::
select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10)
Troubleshooting
---------------
If you have problems that seem server related, first check that you are
using the most recent stable MySQL-Python package available. The Database
Notes page on the wiki at http://www.sqlalchemy.org is a good resource for
timely information affecting MySQL in SQLAlchemy.
"""
import datetime, decimal, inspect, re, sys
from array import array as _array
from sqlalchemy import exc, log, schema, sql, util
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy.sql import functions as sql_functions
from sqlalchemy.sql import compiler
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy import types as sqltypes
__all__ = (
'MSBigInteger', 'MSMediumInteger', 'MSBinary', 'MSBit', 'MSBlob', 'MSBoolean',
'MSChar', 'MSDate', 'MSDateTime', 'MSDecimal', 'MSDouble',
'MSEnum', 'MSFloat', 'MSInteger', 'MSLongBlob', 'MSLongText',
'MSMediumBlob', 'MSMediumText', 'MSNChar', 'MSNVarChar',
'MSNumeric', 'MSSet', 'MSSmallInteger', 'MSString', 'MSText',
'MSTime', 'MSTimeStamp', 'MSTinyBlob', 'MSTinyInteger',
'MSTinyText', 'MSVarBinary', 'MSYear' )
RESERVED_WORDS = set(
['accessible', 'add', 'all', 'alter', 'analyze','and', 'as', 'asc',
'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check',
'collate', 'column', 'condition', 'constraint', 'continue', 'convert',
'create', 'cross', 'current_date', 'current_time', 'current_timestamp',
'current_user', 'cursor', 'database', 'databases', 'day_hour',
'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal',
'declare', 'default', 'delayed', 'delete', 'desc', 'describe',
'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop',
'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists',
'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8',
'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', 'having',
'high_priority', 'hour_microsecond', 'hour_minute', 'hour_second', 'if',
'ignore', 'in', 'index', 'infile', 'inner', 'inout', 'insensitive',
'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer',
'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', 'kill',
'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', 'load',
'localtime', 'localtimestamp', 'lock', 'long', 'longblob', 'longtext',
'loop', 'low_priority', 'master_ssl_verify_server_cert', 'match',
'mediumblob', 'mediumint', 'mediumtext', 'middleint',
'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural',
'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize',
'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile',
'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads',
'read_only', 'read_write', 'real', 'references', 'regexp', 'release',
'rename', 'repeat', 'replace', 'require', 'restrict', 'return',
'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond',
'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial',
'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning',
'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl',
'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob',
'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo',
'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use',
'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary',
'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with',
'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0
'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1
'accessible', 'linear', 'master_ssl_verify_server_cert', 'range',
'read_only', 'read_write', # 5.1
])
AUTOCOMMIT_RE = re.compile(
r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)',
re.I | re.UNICODE)
SET_RE = re.compile(
r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w',
re.I | re.UNICODE)
class _NumericType(object):
"""Base for MySQL numeric types."""
def __init__(self, kw):
self.unsigned = kw.pop('unsigned', False)
self.zerofill = kw.pop('zerofill', False)
def _extend(self, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if self.unsigned:
spec += ' UNSIGNED'
if self.zerofill:
spec += ' ZEROFILL'
return spec
class _StringType(object):
"""Base for MySQL string types."""
def __init__(self, charset=None, collation=None,
ascii=False, unicode=False, binary=False,
national=False, **kwargs):
self.charset = charset
# allow collate= or collation=
self.collation = kwargs.get('collate', collation)
self.ascii = ascii
self.unicode = unicode
self.binary = binary
self.national = national
def _extend(self, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
if self.charset:
charset = 'CHARACTER SET %s' % self.charset
elif self.ascii:
charset = 'ASCII'
elif self.unicode:
charset = 'UNICODE'
else:
charset = None
if self.collation:
collation = 'COLLATE %s' % self.collation
elif self.binary:
collation = 'BINARY'
else:
collation = None
if self.national:
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return ' '.join([c for c in ('NATIONAL', spec, collation)
if c is not None])
return ' '.join([c for c in (spec, charset, collation)
if c is not None])
def __repr__(self):
attributes = inspect.getargspec(self.__init__)[0][1:]
attributes.extend(inspect.getargspec(_StringType.__init__)[0][1:])
params = {}
for attr in attributes:
val = getattr(self, attr)
if val is not None and val is not False:
params[attr] = val
return "%s(%s)" % (self.__class__.__name__,
', '.join(['%s=%r' % (k, params[k]) for k in params]))
class MSNumeric(sqltypes.Numeric, _NumericType):
"""MySQL NUMERIC type."""
def __init__(self, precision=10, scale=2, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
_NumericType.__init__(self, kw)
sqltypes.Numeric.__init__(self, precision, scale, asdecimal=asdecimal, **kw)
def get_col_spec(self):
if self.precision is None:
return self._extend("NUMERIC")
else:
return self._extend("NUMERIC(%(precision)s, %(scale)s)" % {'precision': self.precision, 'scale' : self.scale})
def bind_processor(self, dialect):
return None
def result_processor(self, dialect):
if not self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
else:
return None
class MSDecimal(MSNumeric):
"""MySQL DECIMAL type."""
def __init__(self, precision=10, scale=2, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MSDecimal, self).__init__(precision, scale, asdecimal=asdecimal, **kw)
def get_col_spec(self):
if self.precision is None:
return self._extend("DECIMAL")
elif self.scale is None:
return self._extend("DECIMAL(%(precision)s)" % {'precision': self.precision})
else:
return self._extend("DECIMAL(%(precision)s, %(scale)s)" % {'precision': self.precision, 'scale' : self.scale})
class MSDouble(sqltypes.Float, _NumericType):
"""MySQL DOUBLE type."""
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
if ((precision is None and scale is not None) or
(precision is not None and scale is None)):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
_NumericType.__init__(self, kw)
sqltypes.Float.__init__(self, asdecimal=asdecimal, **kw)
self.scale = scale
self.precision = precision
def get_col_spec(self):
if self.precision is not None and self.scale is not None:
return self._extend("DOUBLE(%(precision)s, %(scale)s)" %
{'precision': self.precision,
'scale' : self.scale})
else:
return self._extend('DOUBLE')
class MSReal(MSDouble):
"""MySQL REAL type."""
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
MSDouble.__init__(self, precision, scale, asdecimal, **kw)
def get_col_spec(self):
if self.precision is not None and self.scale is not None:
return self._extend("REAL(%(precision)s, %(scale)s)" %
{'precision': self.precision,
'scale' : self.scale})
else:
return self._extend('REAL')
class MSFloat(sqltypes.Float, _NumericType):
"""MySQL FLOAT type."""
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
_NumericType.__init__(self, kw)
sqltypes.Float.__init__(self, asdecimal=asdecimal, **kw)
self.scale = scale
self.precision = precision
def get_col_spec(self):
if self.scale is not None and self.precision is not None:
return self._extend("FLOAT(%s, %s)" % (self.precision, self.scale))
elif self.precision is not None:
return self._extend("FLOAT(%s)" % (self.precision,))
else:
return self._extend("FLOAT")
def bind_processor(self, dialect):
return None
class MSInteger(sqltypes.Integer, _NumericType):
"""MySQL INTEGER type."""
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
if 'length' in kw:
util.warn_deprecated("'length' is deprecated for MSInteger and subclasses. Use 'display_width'.")
self.display_width = kw.pop('length')
else:
self.display_width = display_width
_NumericType.__init__(self, kw)
sqltypes.Integer.__init__(self, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("INTEGER(%(display_width)s)" % {'display_width': self.display_width})
else:
return self._extend("INTEGER")
class MSBigInteger(MSInteger):
"""MySQL BIGINTEGER type."""
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MSBigInteger, self).__init__(display_width, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("BIGINT(%(display_width)s)" % {'display_width': self.display_width})
else:
return self._extend("BIGINT")
class MSMediumInteger(MSInteger):
"""MySQL MEDIUMINTEGER type."""
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MSMediumInteger, self).__init__(display_width, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("MEDIUMINT(%(display_width)s)" % {'display_width': self.display_width})
else:
return self._extend("MEDIUMINT")
class MSTinyInteger(MSInteger):
"""MySQL TINYINT type."""
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
Note: following the usual MySQL conventions, TINYINT(1) columns
reflected during Table(..., autoload=True) are treated as
Boolean columns.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MSTinyInteger, self).__init__(display_width, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("TINYINT(%s)" % self.display_width)
else:
return self._extend("TINYINT")
class MSSmallInteger(sqltypes.Smallinteger, MSInteger):
"""MySQL SMALLINTEGER type."""
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
self.display_width = display_width
_NumericType.__init__(self, kw)
sqltypes.SmallInteger.__init__(self, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("SMALLINT(%(display_width)s)" % {'display_width': self.display_width})
else:
return self._extend("SMALLINT")
class MSBit(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for
MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger()
type.
"""
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect):
"""Convert a MySQL's 64 bit, variable length binary string to a long."""
def process(value):
if value is not None:
v = 0L
for i in map(ord, value):
v = v << 8 | i
value = v
return value
return process
def get_col_spec(self):
if self.length is not None:
return "BIT(%s)" % self.length
else:
return "BIT"
class MSDateTime(sqltypes.DateTime):
"""MySQL DATETIME type."""
def get_col_spec(self):
return "DATETIME"
class MSDate(sqltypes.Date):
"""MySQL DATE type."""
def get_col_spec(self):
return "DATE"
class MSTime(sqltypes.Time):
"""MySQL TIME type."""
def get_col_spec(self):
return "TIME"
def result_processor(self, dialect):
def process(value):
# convert from a timedelta value
if value is not None:
return datetime.time(value.seconds/60/60, value.seconds/60%60, value.seconds - (value.seconds/60*60))
else:
return None
return process
class MSTimeStamp(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type.
To signal the orm to automatically re-select modified rows to retrieve the
updated timestamp, add a ``server_default`` to your
:class:`~sqlalchemy.Column` specification::
from sqlalchemy.databases import mysql
Column('updated', mysql.MSTimeStamp,
server_default=sql.text('CURRENT_TIMESTAMP')
)
The full range of MySQL 4.1+ TIMESTAMP defaults can be specified in
the the default::
server_default=sql.text('CURRENT TIMESTAMP ON UPDATE CURRENT_TIMESTAMP')
"""
def get_col_spec(self):
return "TIMESTAMP"
class MSYear(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
def __init__(self, display_width=None):
self.display_width = display_width
def get_col_spec(self):
if self.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % self.display_width
class MSText(_StringType, sqltypes.Text):
"""MySQL TEXT type, for text up to 2^16 characters."""
def __init__(self, length=None, **kwargs):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
_StringType.__init__(self, **kwargs)
sqltypes.Text.__init__(self, length,
kwargs.get('convert_unicode', False), kwargs.get('assert_unicode', None))
def get_col_spec(self):
if self.length:
return self._extend("TEXT(%d)" % self.length)
else:
return self._extend("TEXT")
class MSTinyText(MSText):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MSTinyText, self).__init__(**kwargs)
def get_col_spec(self):
return self._extend("TINYTEXT")
class MSMediumText(MSText):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MSMediumText, self).__init__(**kwargs)
def get_col_spec(self):
return self._extend("MEDIUMTEXT")
class MSLongText(MSText):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MSLongText, self).__init__(**kwargs)
def get_col_spec(self):
return self._extend("LONGTEXT")
class MSString(_StringType, sqltypes.String):
"""MySQL VARCHAR type, for variable-length character data."""
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
_StringType.__init__(self, **kwargs)
sqltypes.String.__init__(self, length,
kwargs.get('convert_unicode', False), kwargs.get('assert_unicode', None))
def get_col_spec(self):
if self.length:
return self._extend("VARCHAR(%d)" % self.length)
else:
return self._extend("VARCHAR")
class MSChar(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
def __init__(self, length, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
_StringType.__init__(self, **kwargs)
sqltypes.CHAR.__init__(self, length,
kwargs.get('convert_unicode', False))
def get_col_spec(self):
return self._extend("CHAR(%(length)s)" % {'length' : self.length})
class MSNVarChar(_StringType, sqltypes.String):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
_StringType.__init__(self, **kwargs)
sqltypes.String.__init__(self, length,
kwargs.get('convert_unicode', False))
def get_col_spec(self):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
return self._extend("VARCHAR(%(length)s)" % {'length': self.length})
class MSNChar(_StringType, sqltypes.CHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR. Arguments are:
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
_StringType.__init__(self, **kwargs)
sqltypes.CHAR.__init__(self, length,
kwargs.get('convert_unicode', False))
def get_col_spec(self):
# We'll actually generate the equiv. "NATIONAL CHAR" instead of "NCHAR".
return self._extend("CHAR(%(length)s)" % {'length': self.length})
class _BinaryType(sqltypes.Binary):
"""Base for MySQL binary types."""
def get_col_spec(self):
if self.length:
return "BLOB(%d)" % self.length
else:
return "BLOB"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
else:
return util.buffer(value)
return process
class MSVarBinary(_BinaryType):
"""MySQL VARBINARY type, for variable length binary data."""
def __init__(self, length=None, **kw):
"""Construct a VARBINARY. Arguments are:
:param length: Maximum data length, in characters.
"""
super(MSVarBinary, self).__init__(length, **kw)
def get_col_spec(self):
if self.length:
return "VARBINARY(%d)" % self.length
else:
return "BLOB"
class MSBinary(_BinaryType):
"""MySQL BINARY type, for fixed length binary data"""
def __init__(self, length=None, **kw):
"""Construct a BINARY.
This is a fixed length type, and short values will be right-padded
with a server-version-specific pad value.
:param length: Maximum data length, in bytes. If length is not
specified, this will generate a BLOB. This usage is deprecated.
"""
super(MSBinary, self).__init__(length, **kw)
def get_col_spec(self):
if self.length:
return "BINARY(%d)" % self.length
else:
return "BLOB"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
else:
return util.buffer(value)
return process
class MSBlob(_BinaryType):
"""MySQL BLOB type, for binary data up to 2^16 bytes"""
def __init__(self, length=None, **kw):
"""Construct a BLOB. Arguments are:
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
"""
super(MSBlob, self).__init__(length, **kw)
def get_col_spec(self):
if self.length:
return "BLOB(%d)" % self.length
else:
return "BLOB"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
else:
return util.buffer(value)
return process
def __repr__(self):
return "%s()" % self.__class__.__name__
class MSTinyBlob(MSBlob):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
def get_col_spec(self):
return "TINYBLOB"
class MSMediumBlob(MSBlob):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
def get_col_spec(self):
return "MEDIUMBLOB"
class MSLongBlob(MSBlob):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
def get_col_spec(self):
return "LONGBLOB"
class MSEnum(MSString):
"""MySQL ENUM type."""
def __init__(self, *enums, **kw):
"""Construct an ENUM.
Example:
Column('myenum', MSEnum("foo", "bar", "baz"))
Arguments are:
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below).
:param strict: Defaults to False: ensure that a given value is in this
ENUM's range of permissible values when inserting or updating rows.
Note that MySQL will not raise a fatal error if you attempt to store
an out of range value- an alternate value will be stored instead.
(See MySQL ENUM documentation.)
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
self.quoting = kw.pop('quoting', 'auto')
if self.quoting == 'auto':
# What quoting character are we using?
q = None
for e in enums:
if len(e) == 0:
self.quoting = 'unquoted'
break
elif q is None:
q = e[0]
if e[0] != q or e[-1] != q:
self.quoting = 'unquoted'
break
else:
self.quoting = 'quoted'
if self.quoting == 'quoted':
util.warn_pending_deprecation(
'Manually quoting ENUM value literals is deprecated. Supply '
'unquoted values and use the quoting= option in cases of '
'ambiguity.')
strip_enums = []
for a in enums:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_enums.append(a)
self.enums = strip_enums
else:
self.enums = list(enums)
self.strict = kw.pop('strict', False)
length = max([len(v) for v in self.enums] + [0])
super(MSEnum, self).__init__(length, **kw)
def bind_processor(self, dialect):
super_convert = super(MSEnum, self).bind_processor(dialect)
def process(value):
if self.strict and value is not None and value not in self.enums:
raise exc.InvalidRequestError('"%s" not a valid value for '
'this enum' % value)
if super_convert:
return super_convert(value)
else:
return value
return process
def get_col_spec(self):
quoted_enums = []
for e in self.enums:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend("ENUM(%s)" % ",".join(quoted_enums))
class MSSet(MSString):
"""MySQL SET type."""
def __init__(self, *values, **kw):
"""Construct a SET.
Example::
Column('myset', MSSet("'foo'", "'bar'", "'baz'"))
Arguments are:
:param values: The range of valid values for this SET. Values will be
used exactly as they appear when generating schemas. Strings must
be quoted, as in the example above. Single-quotes are suggested for
ANSI compatibility and are required for portability to servers with
ANSI_QUOTES enabled.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
self.__ddl_values = values
strip_values = []
for a in values:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_values.append(a)
self.values = strip_values
length = max([len(v) for v in strip_values] + [0])
super(MSSet, self).__init__(length, **kw)
def result_processor(self, dialect):
def process(value):
# The good news:
# No ',' quoting issues- commas aren't allowed in SET values
# The bad news:
# Plenty of driver inconsistencies here.
if isinstance(value, util.set_types):
# ..some versions convert '' to an empty set
if not value:
value.add('')
# ..some return sets.Set, even for pythons that have __builtin__.set
if not isinstance(value, set):
value = set(value)
return value
# ...and some versions return strings
if value is not None:
return set(value.split(','))
else:
return value
return process
def bind_processor(self, dialect):
super_convert = super(MSSet, self).bind_processor(dialect)
def process(value):
if value is None or isinstance(value, (int, long, basestring)):
pass
else:
if None in value:
value = set(value)
value.remove(None)
value.add('')
value = ','.join(value)
if super_convert:
return super_convert(value)
else:
return value
return process
def get_col_spec(self):
return self._extend("SET(%s)" % ",".join(self.__ddl_values))
class MSBoolean(sqltypes.Boolean):
"""MySQL BOOLEAN type."""
def get_col_spec(self):
return "BOOL"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
return value and True or False
return process
def bind_processor(self, dialect):
def process(value):
if value is True:
return 1
elif value is False:
return 0
elif value is None:
return None
else:
return value and True or False
return process
colspecs = {
sqltypes.Integer: MSInteger,
sqltypes.Smallinteger: MSSmallInteger,
sqltypes.Numeric: MSNumeric,
sqltypes.Float: MSFloat,
sqltypes.DateTime: MSDateTime,
sqltypes.Date: MSDate,
sqltypes.Time: MSTime,
sqltypes.String: MSString,
sqltypes.Binary: MSBlob,
sqltypes.Boolean: MSBoolean,
sqltypes.Text: MSText,
sqltypes.CHAR: MSChar,
sqltypes.NCHAR: MSNChar,
sqltypes.TIMESTAMP: MSTimeStamp,
sqltypes.BLOB: MSBlob,
MSDouble: MSDouble,
MSReal: MSReal,
_BinaryType: _BinaryType,
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
'bigint': MSBigInteger,
'binary': MSBinary,
'bit': MSBit,
'blob': MSBlob,
'boolean':MSBoolean,
'char': MSChar,
'date': MSDate,
'datetime': MSDateTime,
'decimal': MSDecimal,
'double': MSDouble,
'enum': MSEnum,
'fixed': MSDecimal,
'float': MSFloat,
'int': MSInteger,
'integer': MSInteger,
'longblob': MSLongBlob,
'longtext': MSLongText,
'mediumblob': MSMediumBlob,
'mediumint': MSMediumInteger,
'mediumtext': MSMediumText,
'nchar': MSNChar,
'nvarchar': MSNVarChar,
'numeric': MSNumeric,
'set': MSSet,
'smallint': MSSmallInteger,
'text': MSText,
'time': MSTime,
'timestamp': MSTimeStamp,
'tinyblob': MSTinyBlob,
'tinyint': MSTinyInteger,
'tinytext': MSTinyText,
'varbinary': MSVarBinary,
'varchar': MSString,
'year': MSYear,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def post_exec(self):
if self.compiled.isinsert and not self.executemany:
if (not len(self._last_inserted_ids) or
self._last_inserted_ids[0] is None):
self._last_inserted_ids = ([self.cursor.lastrowid] +
self._last_inserted_ids[1:])
elif (not self.isupdate and not self.should_autocommit and
self.statement and SET_RE.match(self.statement)):
# This misses if a user forces autocommit on text('SET NAMES'),
# which is probably a programming error anyhow.
self.connection.info.pop(('mysql', 'charset'), None)
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect. Not used directly in application code."""
name = 'mysql'
supports_alter = True
supports_unicode_statements = False
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
supports_sane_rowcount = True
default_paramstyle = 'format'
def __init__(self, use_ansiquotes=None, **kwargs):
self.use_ansiquotes = use_ansiquotes
default.DefaultDialect.__init__(self, **kwargs)
def dbapi(cls):
import MySQLdb as mysql
return mysql
dbapi = classmethod(dbapi)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'client_flag', int)
util.coerce_kw_type(opts, 'local_infile', int)
# Note: using either of the below will cause all strings to be returned
# as Unicode, both in raw SQL operations and with column types like
# String and MSString.
util.coerce_kw_type(opts, 'use_unicode', bool)
util.coerce_kw_type(opts, 'charset', str)
# Rich values 'cursorclass' and 'conv' are not supported via
# query string.
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get('client_flag', 0)
if self.dbapi is not None:
try:
import MySQLdb.constants.CLIENT as CLIENT_FLAGS
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except:
pass
opts['client_flag'] = client_flag
return [[], opts]
def type_descriptor(self, typeobj):
return sqltypes.adapt_type(typeobj, colspecs)
def do_executemany(self, cursor, statement, parameters, context=None):
rowcount = cursor.executemany(statement, parameters)
if context is not None:
context._rowcount = rowcount
def supports_unicode_statements(self):
return True
def do_commit(self, connection):
"""Execute a COMMIT."""
# COMMIT/ROLLBACK were introduced in 3.23.15.
# Yes, we have at least one user who has to talk to these old versions!
#
# Ignore commit/rollback if support isn't present, otherwise even basic
# operations via autocommit fail.
try:
connection.commit()
except:
if self._server_version_info(connection) < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_rollback(self, connection):
"""Execute a ROLLBACK."""
try:
connection.rollback()
except:
if self._server_version_info(connection) < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_begin_twophase(self, connection, xid):
connection.execute("XA BEGIN %s", xid)
def do_prepare_twophase(self, connection, xid):
connection.execute("XA END %s", xid)
connection.execute("XA PREPARE %s", xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
connection.execute("XA END %s", xid)
connection.execute("XA ROLLBACK %s", xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute("XA COMMIT %s", xid)
def do_recover_twophase(self, connection):
resultset = connection.execute("XA RECOVER")
return [row['data'][0:row['gtrid_length']] for row in resultset]
def do_ping(self, connection):
connection.ping()
def is_disconnect(self, e):
if isinstance(e, self.dbapi.OperationalError):
return e.args[0] in (2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError): # if underlying connection is closed, this is the error you get
return "(0, '')" in str(e)
else:
return False
def get_default_schema_name(self, connection):
return connection.execute('SELECT DATABASE()').scalar()
get_default_schema_name = engine_base.connection_memoize(
('dialect', 'default_schema_name'))(get_default_schema_name)
def table_names(self, connection, schema):
"""Return a Unicode SHOW TABLES from a given schema."""
charset = self._detect_charset(connection)
self._autoset_identifier_style(connection)
rp = connection.execute("SHOW TABLES FROM %s" %
self.identifier_preparer.quote_identifier(schema))
return [row[0] for row in _compat_fetchall(rp, charset=charset)]
def has_table(self, connection, table_name, schema=None):
# SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
# on macosx (and maybe win?) with multibyte table names.
#
# TODO: if this is not a problem on win, make the strategy swappable
# based on platform. DESCRIBE is slower.
# [ticket:726]
# full_name = self.identifier_preparer.format_table(table,
# use_schema=True)
self._autoset_identifier_style(connection)
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
st = "DESCRIBE %s" % full_name
rs = None
try:
try:
rs = connection.execute(st)
have = rs.rowcount > 0
rs.close()
return have
except exc.SQLError, e:
if e.orig.args[0] == 1146:
return False
raise
finally:
if rs:
rs.close()
def server_version_info(self, connection):
"""A tuple of the database server version.
Formats the remote server version as a tuple of version values,
e.g. ``(5, 0, 44)``. If there are strings in the version number
they will be in the tuple too, so don't count on these all being
``int`` values.
This is a fast check that does not require a round trip. It is also
cached per-Connection.
"""
return self._server_version_info(connection.connection.connection)
server_version_info = engine_base.connection_memoize(
('mysql', 'server_version_info'))(server_version_info)
def _server_version_info(self, dbapi_con):
"""Convert a MySQL-python server_info string into a tuple."""
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.get_server_info()):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def reflecttable(self, connection, table, include_columns):
"""Load column definitions from the server."""
charset = self._detect_charset(connection)
self._autoset_identifier_style(connection)
try:
reflector = self.reflector
except AttributeError:
preparer = self.identifier_preparer
if (self.server_version_info(connection) < (4, 1) and
self.use_ansiquotes):
# ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
preparer = MySQLIdentifierPreparer(self)
self.reflector = reflector = MySQLSchemaReflector(preparer)
sql = self._show_create_table(connection, table, charset)
if sql.startswith('CREATE ALGORITHM'):
# Adapt views to something table-like.
columns = self._describe_table(connection, table, charset)
sql = reflector._describe_to_create(table, columns)
self._adjust_casing(connection, table)
return reflector.reflect(connection, table, sql, charset,
only=include_columns)
def _adjust_casing(self, connection, table, charset=None):
"""Adjust Table name to the server case sensitivity, if needed."""
casing = self._detect_casing(connection)
# For winxx database hosts. TODO: is this really needed?
if casing == 1 and table.name != table.name.lower():
table.name = table.name.lower()
lc_alias = schema._get_table_key(table.name, table.schema)
table.metadata.tables[lc_alias] = table
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Allow user override, won't sniff if force_charset is set.
if ('mysql', 'force_charset') in connection.info:
return connection.info[('mysql', 'force_charset')]
# Note: MySQL-python 1.2.1c7 seems to ignore changes made
# on a connection via set_character_set()
if self.server_version_info(connection) < (4, 1, 0):
try:
return connection.connection.character_set_name()
except AttributeError:
# < 1.2.1 final MySQL-python drivers have no charset support.
# a query is needed.
pass
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in _compat_fetchall(rs)])
if 'character_set_results' in opts:
return opts['character_set_results']
try:
return connection.connection.character_set_name()
except AttributeError:
# Still no charset on < 1.2.1 final...
if 'character_set' in opts:
return opts['character_set']
else:
util.warn(
"Could not detect the connection character set with this "
"combination of MySQL server and MySQL-python. "
"MySQL-python >= 1.2.2 is recommended. Assuming latin1.")
return 'latin1'
_detect_charset = engine_base.connection_memoize(
('mysql', 'charset'))(_detect_charset)
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
charset = self._detect_charset(connection)
row = _compat_fetchone(connection.execute(
"SHOW VARIABLES LIKE 'lower_case_table_names'"),
charset=charset)
if not row:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if row[1] == 'OFF':
cs = 0
elif row[1] == 'ON':
cs = 1
else:
cs = int(row[1])
row.close()
return cs
_detect_casing = engine_base.connection_memoize(
('mysql', 'lower_case_table_names'))(_detect_casing)
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
if self.server_version_info(connection) < (4, 1, 0):
pass
else:
charset = self._detect_charset(connection)
rs = connection.execute('SHOW COLLATION')
for row in _compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
_detect_collations = engine_base.connection_memoize(
('mysql', 'collations'))(_detect_collations)
def use_ansiquotes(self, useansi):
self._use_ansiquotes = useansi
if useansi:
self.preparer = MySQLANSIIdentifierPreparer
else:
self.preparer = MySQLIdentifierPreparer
# icky
if hasattr(self, 'identifier_preparer'):
self.identifier_preparer = self.preparer(self)
if hasattr(self, 'reflector'):
del self.reflector
use_ansiquotes = property(lambda s: s._use_ansiquotes, use_ansiquotes,
doc="True if ANSI_QUOTES is in effect.")
def _autoset_identifier_style(self, connection, charset=None):
"""Detect and adjust for the ANSI_QUOTES sql mode.
If the dialect's use_ansiquotes is unset, query the server's sql mode
and reset the identifier style.
Note that this currently *only* runs during reflection. Ideally this
would run the first time a connection pool connects to the database,
but the infrastructure for that is not yet in place.
"""
if self.use_ansiquotes is not None:
return
row = _compat_fetchone(
connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
charset=charset)
if not row:
mode = ''
else:
mode = row[1] or ''
# 4.0
if mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or ''
self.use_ansiquotes = 'ANSI_QUOTES' in mode
def _show_create_table(self, connection, table, charset=None,
full_name=None):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
try:
rp = connection.execute(st)
except exc.SQLError, e:
if e.orig.args[0] == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
row = _compat_fetchone(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
finally:
if rp:
rp.close()
return sql
def _describe_table(self, connection, table, charset=None,
full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execute(st)
except exc.SQLError, e:
if e.orig.args[0] == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
rows = _compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class _MySQLPythonRowProxy(object):
"""Return consistent column values for all versions of MySQL-python.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = charset
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, str):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, str):
return item.decode(self.charset)
else:
return item
class MySQLCompiler(compiler.DefaultCompiler):
operators = compiler.DefaultCompiler.operators.copy()
operators.update({
sql_operators.concat_op: lambda x, y: "concat(%s, %s)" % (x, y),
sql_operators.mod: '%%',
sql_operators.match_op: lambda x, y: "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % (x, y)
})
functions = compiler.DefaultCompiler.functions.copy()
functions.update ({
sql_functions.random: 'rand%(expr)s',
"utc_timestamp":"UTC_TIMESTAMP"
})
extract_map = compiler.DefaultCompiler.extract_map.copy()
extract_map.update ({
'milliseconds': 'millisecond',
})
def visit_typeclause(self, typeclause):
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, MSInteger):
if getattr(type_, 'unsigned', False):
return 'UNSIGNED INTEGER'
else:
return 'SIGNED INTEGER'
elif isinstance(type_, (MSDecimal, MSDateTime, MSDate, MSTime)):
return type_.get_col_spec()
elif isinstance(type_, MSText):
return 'CHAR'
elif (isinstance(type_, _StringType) and not
isinstance(type_, (MSEnum, MSSet))):
if getattr(type_, 'length'):
return 'CHAR(%s)' % type_.length
else:
return 'CHAR'
elif isinstance(type_, _BinaryType):
return 'BINARY'
elif isinstance(type_, MSNumeric):
return type_.get_col_spec().replace('NUMERIC', 'DECIMAL')
elif isinstance(type_, MSTimeStamp):
return 'DATETIME'
elif isinstance(type_, (MSDateTime, MSDate, MSTime)):
return type_.get_col_spec()
else:
return None
def visit_cast(self, cast, **kwargs):
# No cast until 4, no decimals until 5.
type_ = self.process(cast.typeclause)
if type_ is None:
return self.process(cast.clause)
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
def post_process_text(self, text):
if '%%' in text:
util.warn("The SQLAlchemy MySQLDB dialect now automatically escapes '%' in text() expressions to '%%'.")
return text.replace('%', '%%')
def get_select_precolumns(self, select):
if isinstance(select._distinct, basestring):
return select._distinct.upper() + " "
elif select._distinct:
return "DISTINCT "
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
# 'JOIN ... ON ...' for inner joins isn't available until 4.0.
# Apparently < 3.23.17 requires theta joins for inner joins
# (but not outer). Not generating these currently, but
# support can be added, preferably after dialects are
# refactored to be version-sensitive.
return ''.join(
(self.process(join.left, asfrom=True),
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "),
self.process(join.right, asfrom=True),
" ON ",
self.process(join.onclause)))
def for_update_clause(self, select):
if select.for_update == 'read':
return ' LOCK IN SHARE MODE'
else:
return super(MySQLCompiler, self).for_update_clause(select)
def limit_clause(self, select):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit, offset = select._limit, select._offset
if (limit, offset) == (None, None):
return ''
elif offset is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
if limit is None:
limit = 18446744073709551615
return ' \n LIMIT %s, %s' % (offset, limit)
else:
# No offset provided, so just use the limit
return ' \n LIMIT %s' % (limit,)
def visit_update(self, update_stmt):
self.stack.append({'from': set([update_stmt.table])})
self.isupdate = True
colparams = self._get_colparams(update_stmt)
text = "UPDATE " + self.preparer.format_table(update_stmt.table) + " SET " + ', '.join(["%s=%s" % (self.preparer.format_column(c[0]), c[1]) for c in colparams])
if update_stmt._whereclause:
text += " WHERE " + self.process(update_stmt._whereclause)
limit = update_stmt.kwargs.get('mysql_limit', None)
if limit:
text += " LIMIT %s" % limit
self.stack.pop(-1)
return text
# ug. "InnoDB needs indexes on foreign keys and referenced keys [...].
# Starting with MySQL 4.1.2, these indexes are created automatically.
# In older versions, the indexes must be created explicitly or the
# creation of foreign key constraints fails."
class MySQLSchemaGenerator(compiler.SchemaGenerator):
def get_column_specification(self, column, first_pk=False):
"""Builds column DDL."""
colspec = [self.preparer.format_column(column),
column.type.dialect_impl(self.dialect).get_col_spec()]
default = self.get_column_default_string(column)
if default is not None:
colspec.append('DEFAULT ' + default)
if not column.nullable:
colspec.append('NOT NULL')
if column.primary_key and column.autoincrement:
try:
first = [c for c in column.table.primary_key.columns
if (c.autoincrement and
isinstance(c.type, sqltypes.Integer) and
not c.foreign_keys)].pop(0)
if column is first:
colspec.append('AUTO_INCREMENT')
except IndexError:
pass
return ' '.join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
for k in table.kwargs:
if k.startswith('mysql_'):
opt = k[6:].upper()
joiner = '='
if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
'CHARACTER SET', 'COLLATE'):
joiner = ' '
table_opts.append(joiner.join((opt, table.kwargs[k])))
return ' '.join(table_opts)
class MySQLSchemaDropper(compiler.SchemaDropper):
def visit_index(self, index):
self.append("\nDROP INDEX %s ON %s" %
(self.preparer.quote(self._validate_identifier(index.name, False), index.quote),
self.preparer.format_table(index.table)))
self.execute()
def drop_foreignkey(self, constraint):
self.append("ALTER TABLE %s DROP FOREIGN KEY %s" %
(self.preparer.format_table(constraint.table),
self.preparer.format_constraint(constraint)))
self.execute()
class MySQLSchemaReflector(object):
"""Parses SHOW CREATE TABLE output."""
def __init__(self, identifier_preparer):
"""Construct a MySQLSchemaReflector.
identifier_preparer
An ANSIIdentifierPreparer type, used to determine the identifier
quoting style in effect.
"""
self.preparer = identifier_preparer
self._prep_regexes()
def reflect(self, connection, table, show_create, charset, only=None):
"""Parse MySQL SHOW CREATE TABLE and fill in a ''Table''.
show_create
Unicode output of SHOW CREATE TABLE
table
A ''Table'', to be loaded with Columns, Indexes, etc.
table.name will be set if not already
charset
FIXME, some constructed values (like column defaults)
currently can't be Unicode. ''charset'' will convert them
into the connection character set.
only
An optional sequence of column names. If provided, only
these columns will be reflected, and any keys or constraints
that include columns outside this set will also be omitted.
That means that if ``only`` includes only one column in a
2 part primary key, the entire primary key will be omitted.
"""
keys, constraints = [], []
if only:
only = set(only)
for line in re.split(r'\r?\n', show_create):
if line.startswith(' ' + self.preparer.initial_quote):
self._add_column(table, line, charset, only)
# a regular table options line
elif line.startswith(') '):
self._set_options(table, line)
# an ANSI-mode table options line
elif line == ')':
pass
elif line.startswith('CREATE '):
self._set_name(table, line)
# Not present in real reflection, but may be if loading from a file.
elif not line:
pass
else:
type_, spec = self.parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == 'key':
keys.append(spec)
elif type_ == 'constraint':
constraints.append(spec)
else:
pass
self._set_keys(table, keys, only)
self._set_constraints(table, constraints, connection, only)
def _set_name(self, table, line):
"""Override a Table name with the reflected name.
table
A ``Table``
line
The first line of SHOW CREATE TABLE output.
"""
# Don't override by default.
if table.name is None:
table.name = self.parse_name(line)
def _add_column(self, table, line, charset, only=None):
spec = self.parse_column(line)
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec['full']:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args, notnull = \
spec['name'], spec['coltype'], spec['arg'], spec['notnull']
if only and name not in only:
self.logger.info("Omitting reflected column %s.%s" %
(table.name, name))
return
# Convention says that TINYINT(1) columns == BOOLEAN
if type_ == 'tinyint' and args == '1':
type_ = 'boolean'
args = None
try:
col_type = ischema_names[type_]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == '':
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
for kw in ('unsigned', 'zerofill'):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ('charset', 'collate'):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if type_ == 'enum':
type_kw['quoting'] = 'quoted'
type_instance = col_type(*type_args, **type_kw)
col_args, col_kw = [], {}
# NOT NULL
if spec.get('notnull', False):
col_kw['nullable'] = False
# AUTO_INCREMENT
if spec.get('autoincr', False):
col_kw['autoincrement'] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw['autoincrement'] = False
# DEFAULT
default = spec.get('default', None)
if default is not None and default != 'NULL':
# Defaults should be in the native charset for the moment
default = default.encode(charset)
if type_ == 'timestamp':
# can't be NULL for TIMESTAMPs
if (default[0], default[-1]) != ("'", "'"):
default = sql.text(default)
else:
default = default[1:-1]
col_args.append(schema.DefaultClause(default))
table.append_column(schema.Column(name, type_instance,
*col_args, **col_kw))
def _set_keys(self, table, keys, only):
"""Add ``Index`` and ``PrimaryKeyConstraint`` items to a ``Table``.
Most of the information gets dropped here- more is reflected than
the schema objects can currently represent.
table
A ``Table``
keys
A sequence of key specifications produced by `constraints`
only
Optional `set` of column names. If provided, keys covering
columns not in this set will be omitted.
"""
for spec in keys:
flavor = spec['type']
col_names = [s[0] for s in spec['columns']]
if only and not set(col_names).issubset(only):
if flavor is None:
flavor = 'index'
self.logger.info(
"Omitting %s KEY for (%s), key covers ommitted columns." %
(flavor, ', '.join(col_names)))
continue
constraint = False
if flavor == 'PRIMARY':
key = schema.PrimaryKeyConstraint()
constraint = True
elif flavor == 'UNIQUE':
key = schema.Index(spec['name'], unique=True)
elif flavor in (None, 'FULLTEXT', 'SPATIAL'):
key = schema.Index(spec['name'])
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY" % flavor)
key = schema.Index(spec['name'])
for col in [table.c[name] for name in col_names]:
key.append_column(col)
if constraint:
table.append_constraint(key)
def _set_constraints(self, table, constraints, connection, only):
"""Apply constraints to a ``Table``."""
default_schema = None
for spec in constraints:
# only FOREIGN KEYs
ref_name = spec['table'][-1]
ref_schema = len(spec['table']) > 1 and spec['table'][-2] or table.schema
if not ref_schema:
if default_schema is None:
default_schema = connection.dialect.get_default_schema_name(
connection)
if table.schema == default_schema:
ref_schema = table.schema
loc_names = spec['local']
if only and not set(loc_names).issubset(only):
self.logger.info(
"Omitting FOREIGN KEY for (%s), key covers ommitted "
"columns." % (', '.join(loc_names)))
continue
ref_key = schema._get_table_key(ref_name, ref_schema)
if ref_key in table.metadata.tables:
ref_table = table.metadata.tables[ref_key]
else:
ref_table = schema.Table(
ref_name, table.metadata, schema=ref_schema,
autoload=True, autoload_with=connection)
ref_names = spec['foreign']
if ref_schema:
refspec = [".".join([ref_schema, ref_name, column]) for column in ref_names]
else:
refspec = [".".join([ref_name, column]) for column in ref_names]
con_kw = {}
for opt in ('name', 'onupdate', 'ondelete'):
if spec.get(opt, False):
con_kw[opt] = spec[opt]
key = schema.ForeignKeyConstraint(loc_names, refspec, link_to_name=True, **con_kw)
table.append_constraint(key)
def _set_options(self, table, line):
"""Apply safe reflected table options to a ``Table``.
table
A ``Table``
line
The final line of SHOW CREATE TABLE output.
"""
options = self.parse_table_options(line)
for nope in ('auto_increment', 'data_directory', 'index_directory'):
options.pop(nope, None)
for opt, val in options.items():
table.kwargs['mysql_%s' % opt] = val
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
self._re_options_util = {}
_final = self.preparer.final_quote
quotes = dict(zip(('iq', 'fq', 'esc_fq'),
[re.escape(s) for s in
(self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final))]))
self._pr_name = _pr_compile(
r'^CREATE (?:\w+ +)?TABLE +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
self.preparer._unescape_identifier)
# `col`,`col2`(32),`col3`(15) DESC
#
# Note: ASC and DESC aren't reflected, so we'll punt...
self._re_keyexprs = _re_compile(
r'(?:'
r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
# 123 or 123,456
self._re_csv_int = _re_compile(r'\d+')
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
r'(?: +(?P<unsigned>UNSIGNED))?'
r'(?: +(?P<zerofill>ZEROFILL))?'
r'(?: +CHARACTER SET +(?P<charset>\w+))?'
r'(?: +COLLATE +(P<collate>\w+))?'
r'(?: +(?P<notnull>NOT NULL))?'
r'(?: +DEFAULT +(?P<default>'
r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+)'
r'(?:ON UPDATE \w+)?'
r'))?'
r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
r'(?: +STORAGE +(?P<storage>\w+))?'
r'(?: +(?P<extra>.*))?'
r',?$'
% quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
r'.*?(?P<notnull>NOT NULL)?'
% quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name
self._re_key = _re_compile(
r' '
r'(?:(?P<type>\S+) )?KEY'
r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
r'(?: +USING +(?P<using_pre>\S+))?'
r' +\((?P<columns>.+?)\)'
r'(?: +USING +(?P<using_post>\S+))?'
r'(?: +KEY_BLOCK_SIZE +(?P<keyblock>\S+))?'
r'(?: +WITH PARSER +(?P<parser>\S+))?'
r',?$'
% quotes
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw['on'] = 'RESTRICT|CASCASDE|SET NULL|NOACTION'
self._re_constraint = _re_compile(
r' '
r'CONSTRAINT +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'FOREIGN KEY +'
r'\((?P<local>[^\)]+?)\) REFERENCES +'
r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
r'\((?P<foreign>[^\)]+?)\)'
r'(?: +(?P<match>MATCH \w+))?'
r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
% kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(
r' '
r'(?:SUB)?PARTITION')
# Table-level options (COLLATE, ENGINE, etc.)
for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
'AVG_ROW_LENGTH', 'CHARACTER SET',
'DEFAULT CHARSET', 'CHECKSUM',
'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
'KEY_BLOCK_SIZE'):
self._add_option_word(option)
for option in (('COMMENT', 'DATA_DIRECTORY', 'INDEX_DIRECTORY',
'PASSWORD', 'CONNECTION')):
self._add_option_string(option)
self._add_option_regex('UNION', r'\([^\)]+\)')
self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
self._add_option_regex('RAID_TYPE',
r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
self._re_options_util['='] = _re_compile(r'\s*=\s*$')
def _add_option_string(self, directive):
regex = (r'(?P<directive>%s\s*(?:=\s*)?)'
r'(?:\x27.(?P<val>.*?)\x27(?!\x27)\x27)' %
re.escape(directive))
self._pr_options.append(
_pr_compile(regex, lambda v: v.replace("''", "'")))
def _add_option_word(self, directive):
regex = (r'(?P<directive>%s\s*(?:=\s*)?)'
r'(?P<val>\w+)' % re.escape(directive))
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = (r'(?P<directive>%s\s*(?:=\s*)?)'
r'(?P<val>%s)' % (re.escape(directive), regex))
self._pr_options.append(_pr_compile(regex))
def parse_name(self, line):
"""Extract the table name.
line
The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if not m:
return None
return cleanup(m.group('name'))
def parse_column(self, line):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
line
Any column-bearing line from SHOW CREATE TABLE
"""
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec['full'] = True
return spec
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec['full'] = False
return spec
return None
def parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
line
A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
spec['columns'] = self._parse_keyexprs(spec['columns'])
return 'key', spec
# CONSTRAINT
m = self._re_constraint.match(line)
if m:
spec = m.groupdict()
spec['table'] = \
self.preparer.unformat_identifiers(spec['table'])
spec['local'] = [c[0]
for c in self._parse_keyexprs(spec['local'])]
spec['foreign'] = [c[0]
for c in self._parse_keyexprs(spec['foreign'])]
return 'constraint', spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return 'partition', line
# No match.
return (None, line)
def parse_table_options(self, line):
"""Build a dictionary of all reflected table-level options.
line
The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ')':
return options
r_eq_trim = self._re_options_util['=']
for regex, cleanup in self._pr_options:
m = regex.search(line)
if not m:
continue
directive, value = m.group('directive'), m.group('val')
directive = r_eq_trim.sub('', directive).lower()
if cleanup:
value = cleanup(value)
options[directive] = value
return options
def _describe_to_create(self, table, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
`columns` is a sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = \
[row[i] for i in (0, 1, 2, 4, 5)]
line = [' ']
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append('NOT NULL')
if default:
if 'auto_increment' in default:
pass
elif (col_type.startswith('timestamp') and
default.startswith('C')):
line.append('DEFAULT')
line.append(default)
elif default == 'NULL':
line.append('DEFAULT')
line.append(default)
else:
line.append('DEFAULT')
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(' '.join(line))
return ''.join([('CREATE TABLE %s (\n' %
self.preparer.quote_identifier(table.name)),
',\n'.join(buffer),
'\n) '])
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
log.class_logger(MySQLSchemaReflector)
class _MySQLIdentifierPreparer(compiler.IdentifierPreparer):
"""MySQL-specific schema identifier configuration."""
reserved_words = RESERVED_WORDS
def __init__(self, dialect, **kw):
super(_MySQLIdentifierPreparer, self).__init__(dialect, **kw)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
class MySQLIdentifierPreparer(_MySQLIdentifierPreparer):
"""Traditional MySQL-specific schema identifier configuration."""
def __init__(self, dialect):
super(MySQLIdentifierPreparer, self).__init__(dialect, initial_quote="`")
def _escape_identifier(self, value):
return value.replace('`', '``')
def _unescape_identifier(self, value):
return value.replace('``', '`')
class MySQLANSIIdentifierPreparer(_MySQLIdentifierPreparer):
"""ANSI_QUOTES MySQL schema identifier configuration."""
pass
def _compat_fetchall(rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver inconsistencies."""
return [_MySQLPythonRowProxy(row, charset) for row in rp.fetchall()]
def _compat_fetchone(rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver inconsistencies."""
return _MySQLPythonRowProxy(rp.fetchone(), charset)
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
dialect = MySQLDialect
dialect.statement_compiler = MySQLCompiler
dialect.schemagenerator = MySQLSchemaGenerator
dialect.schemadropper = MySQLSchemaDropper
dialect.execution_ctx_cls = MySQLExecutionContext
| [
"root@ip-10-118-137-129.ec2.internal"
] | root@ip-10-118-137-129.ec2.internal |
efed41830ae32c4bd43678d92ff17f433ecfa92d | f12024bf505b270177ad24e7acc9c9980f0ad1a8 | /aidants_connect_web/migrations/0002_auto_20220511_1735.py | a7511994fa2c6e1a4487fd2519a07eb859755a02 | [
"MIT"
] | permissive | betagouv/Aidants_Connect | 18be11e542d3071e516c32b357e9dbab927ecedb | 228ff94f5f3696ed8384992d7a8c9a3adbe6eeda | refs/heads/main | 2023-08-31T02:49:05.557288 | 2023-08-29T09:10:14 | 2023-08-29T09:10:14 | 180,139,859 | 22 | 15 | MIT | 2023-09-13T09:37:51 | 2019-04-08T12:02:11 | Python | UTF-8 | Python | false | false | 664 | py | # Generated by Django 3.2.12 on 2022-05-11 15:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aidants_connect_web', '0001_20220329_stable_schema'),
]
operations = [
migrations.AddField(
model_name='habilitationrequest',
name='date_test_pix',
field=models.DateTimeField(blank=True, null=True, verbose_name='Date test PIX'),
),
migrations.AddField(
model_name='habilitationrequest',
name='test_pix_passed',
field=models.BooleanField(default=False, verbose_name='Test PIX'),
),
]
| [
"liviapribeiro@gmail.com"
] | liviapribeiro@gmail.com |
68b565789058c8489d1540a33c82ab5cffbc36d4 | bf362dbe80929d1f6bd6600a91b9bb6882c4120c | /train.py | 7e1bd37241ad09f38cf71be7b93dfa12dc898bee | [] | no_license | Neclow/ee559-project2 | 9cfd5a3ba46745b3672ea10794e6fe0d62f98f01 | 0753602e5fff6c25f3d4597b6e55ffd790e1eed5 | refs/heads/master | 2023-03-06T04:48:03.477123 | 2021-02-23T21:36:17 | 2021-02-23T21:36:17 | 260,314,599 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,709 | py | import time
import torch
from loss import *
from metrics import compute_accuracy
from optim import *
from utils import load_data
def train(model, trainX, trainY, input_criterion = 'mse', input_optimizer = 'sgd',
nb_epochs = 250, eta = 1e-3, mini_batch_size = 100, verbose = False):
'''
Network training
Parameters
-------
model
Neural network to train
trainX
Training data examples
trainY
Training data labels
input_criterion
String to choose loss function
'mse': MSE loss
'cross': Cross-entropy loss
input_optimizer
String to choose optimizer
'sgd': SGD
'mom': SGD with momentum (0.9)
'adam': Adam
nb_epochs
Number of training epochs
eta
Learning rate
mini_batch_size
Size of mini-batch during training
verbose
If true, prints loss every 10 epochs
Returns
-------
Computed training loss at each epoch
'''
# Disable Pytorch autograd
torch.set_grad_enabled(False)
if input_criterion == 'mse':
criterion = MSELoss()
elif input_criterion == 'cross':
criterion = CrossEntropyLoss()
else:
raise ValueError('Criterion not found. Available: "mse" for MSE loss, "cross" for cross-entropy loss.')
if input_optimizer == 'sgd':
optimizer = SGD(model, eta, momentum=0)
elif input_optimizer == 'mom':
optimizer = SGD(model, eta, momentum=0.9)
elif input_optimizer == 'adam':
optimizer = Adam(model, eta)
else:
raise ValueError('Optimizer not found. Available: "sgd" for SGD, "mom" for SGD with momentum, "adam" for Adam optimization.')
losses = torch.zeros(nb_epochs)
# Enable training mode
model.train()
for e in range(nb_epochs):
loss = 0
for b in range(0, trainX.size(0), mini_batch_size):
# Forward pass
out = model(trainX.narrow(0, b, mini_batch_size))
# Compute loss
running_loss = criterion(out, trainY.narrow(0, b, mini_batch_size))
loss += running_loss
# Backward pass
model.zero_grad()
model.backward(criterion.backward())
optimizer.step()
if verbose:
if (e+1) % 10 == 0:
print('Epoch %d/%d: loss = %.4f' % (e+1, nb_epochs, loss))
# Collect loss data
losses[e] = loss
return losses
def trial(net, n_trials = 30, input_criterion = 'mse', input_optimizer = 'sgd',
n_epochs = 250, eta = 1e-3, start_seed = 0, verbose = False, save_data = False):
'''
Perform a trial on a network, i.e. several rounds of training.
Parameters
-------
net
The neural network
n_trials
Number of trainings to perform (Default: 30)
input_criterion
String to choose loss function
'mse': MSE loss
'cross': Cross-entropy loss
input_optimizer
String to choose optimizer
'sgd': SGD
'mom': SGD with momentum (0.9)
'adam': Adam
n_epochs
Number of training epochs (Default: 250)
eta
Learning rate
start_seed
Indicates from where seeds are generated.
start_seed = 0 with 20 trials means that seeds will be 0, ..., 19
verbose
If true, prints final loss, training accuracy and test accuracy for each trial
Train verbose flag can be set to True to also log the loss every 10 epochs
save_data
If true, saves train and test accuracies as a tensor of size (n_trials,) in a .pt file
Can be used to perform later statistical analyses (e.g. test differences of mean between configurations), if needed (not used for this project)
Returns
-------
all_losses
Training losses accumulated at each epoch for each trial
tr_accuracies
Final train accuracy reported at the end of each trial
te_accuracies
Final test accuracy reported at the end of each trial
'''
all_losses = torch.zeros((n_trials, n_epochs))
tr_accuracies = torch.zeros(n_trials)
te_accuracies = torch.zeros(n_trials)
for i in range(n_trials):
# Load data
torch.manual_seed(start_seed+i)
trainX, trainY, testX, testY = load_data()
# Enable training mode and reset weights
net.train()
net.weight_initialization()
# Train
start = time.time()
tr_loss = train(net, trainX, trainY, input_criterion,
input_optimizer, n_epochs, eta, verbose = False)
print('Trial %d/%d... Training time: %.2f s' % (i+1, n_trials, time.time()-start))
# Collect data
all_losses[i] = tr_loss
# Compute train and test accuracy
net.eval() # Dropout layers are disabled in eval mode
with torch.no_grad():
tr_accuracies[i] = compute_accuracy(net, trainX, trainY)
te_accuracies[i] = compute_accuracy(net, testX, testY)
if verbose:
print('Loss: %.4f, Train acc: %.4f, Test acc: %.4f' %
(tr_loss[-1], tr_accuracies[i], te_accuracies[i]))
# Print trial results
print('Train accuracy - mean: %.4f, std: %.4f, median: %.4f' %
(tr_accuracies.mean(), tr_accuracies.std(), tr_accuracies.median()))
print('Test accuracy - mean: %.4f, std: %.4f, median: %.4f' %
(te_accuracies.mean(), te_accuracies.std(), te_accuracies.median()))
if save_data:
torch.save(tr_accuracies, f'train_{input_optimizer}_{input_criterion}_{len(net)}.pt')
torch.save(te_accuracies, f'test_{input_optimizer}_{input_criterion}_{len(net)}.pt')
| [
"neil.scheidwasser-clow@epfl.ch"
] | neil.scheidwasser-clow@epfl.ch |
b34d5bebd57109d20aee7fec341878dfb3c9875c | 31eaed64b0caeda5c5fe3603609402034e6eb7be | /python_zumbi/py_web/test_scraping_2.py | 8504ae20c38d531160f7f991a12e83e59ccd487b | [] | no_license | RaphaelfsOliveira/workspace_python | 93657b581043176ecffb5783de208c0a00924832 | 90959697687b9398cc48146461750942802933b3 | refs/heads/master | 2021-01-11T17:39:49.574875 | 2017-06-28T20:55:43 | 2017-06-28T20:55:43 | 79,814,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | import urllib.request #modulo que permite conversar com a internet
pagina = urllib.request.urlopen(
'http://beans.itcarlow.ie/prices-loyalty.html')
text = pagina.read().decode('utf8')
print(text)
i = text.find('>$')
preco = float(text[i+2:i+6])
if preco < 4.74:
print('Em Promoção: ', preco)
else:
print('Está Caro!!: ', preco)
| [
"raphaelbrf@gmail.com"
] | raphaelbrf@gmail.com |
6fa0a724f104e22e21a81398bf7856c9112ccc71 | 5b75ec6cf092e94bd359ffce19c483e71b8e7163 | /3_Selenium/2-2.py | 7e2e762bac8f8f844dc671253dd1d0d7de0c0a05 | [] | no_license | Jeongyoungwang/1_Lecture | ef443a25191b83328daca2122a418b84a966369f | 8459629c9904573b7157c11738e789e2a1d4b0be | refs/heads/master | 2020-05-03T12:32:39.601764 | 2019-03-31T03:02:17 | 2019-03-31T03:02:17 | 178,629,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | import sys
import io
import requests
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
#Response 상태코드
s = requests.Session()
r = s.get('http://httpbin.org/get')
#print(r.status_code)
#print(r.ok)
#https://jsonplaceholder.typicode.com
r = s.get('https://jsonplaceholder.typicode.com/posts/1')
#print(r.text)
print(r.json())
print(r.json().keys())
print(r.json().values()) #키값 제외하고, 벨류값만 출력
print(r.encoding) #한글 깨지는거 방지 진짜 중요
print(r.content) #바이너리 형태의 데이터로 가져옴
print(r.raw) #로우 형태의 데이터로 가져옴
| [
"sumer3@naver.com"
] | sumer3@naver.com |
23e52be0c8ce3adfba088e27013ec2ded693edb9 | db5030b0ef8de107af5161b44e4cb0a03e97ff18 | /My codes/csv format/T.py | 358b85c80dd0e3e06ae77858af67589d0c55d216 | [] | no_license | saadmaan/Thesis- | c7f7bed9993e7f731930740845f5c402ca4c4736 | 5ae789aa64dae484c8efd6a839854a78465905e1 | refs/heads/master | 2021-02-28T03:06:42.518424 | 2020-05-17T17:41:21 | 2020-05-17T17:41:21 | 245,657,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | import pandas as pd
df = pd.read_csv('F:\THesis\Dataset\Crime.csv')
saved_column = df['Start Date/Time'] #you can also use df['column_name']
print(saved_column)
for row in saved_column:
str(row);
c = row[0]
| [
"saadmanhabib26@gmail.com"
] | saadmanhabib26@gmail.com |
d0686bbf88f5f164a24afb5e2449f189d6ba2b4b | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc008/B/4886377.py | 4b2e8bb008ccb62443ac42cbdabfef1b5a1468e8 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | import collections
N = int(input())
names = [input() for i in range(N)]
max_ele = collections.Counter(names)
print(max_ele.most_common()[0][0]) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
463a971b29b02dbb0551e83fda61984330b49500 | e9a08404751aee4891ed01fc7ef42c05761e853a | /attendanceChkIn_OUT.py | 5b50f76cd1bfae65c13ab12c2a0801639a124bd1 | [] | no_license | kathirm/TNPLTRPA | 8eed651ff46a02bcfe5de48153ec198751ee6c20 | db170bd8ad686543188c6aac82d2e9c12c72b0e7 | refs/heads/master | 2022-12-17T10:26:27.049538 | 2020-07-24T09:35:43 | 2020-07-24T09:35:43 | 171,221,067 | 0 | 0 | null | 2022-12-08T06:21:54 | 2019-02-18T05:35:17 | Python | UTF-8 | Python | false | false | 3,093 | py | import datetime , json, sys, requests
from random import randrange
def date_backward(token, uId, chkType, inpttime):
try:
dtlist = [] #get_backward date
tmlist = [] #get_time's Random
headers = {"Authorization" : "Bearer %s"%token}
cur = datetime.date.today()
curdte = str(cur.strftime("%d-%m-%Y"))
datem = cur.strftime("%m-%Y")
print "Today Date :: %s Current month & Year :: %s"%(curdte, datem)
prvdte = str("01-"+"%s")%(datem)
curRange = cur.strftime("%d")
dterange = int(curRange)-1
start = datetime.datetime.strptime(prvdte, "%d-%m-%Y")
end = datetime.datetime.strptime(curdte, "%d-%m-%Y")
date_array = (start + datetime.timedelta(days=x) for x in range(0, (end-start).days))
for date_object in date_array:
dte = (date_object.strftime("%Y-%m-%d"))
dtlist.append(dte)
startDate = datetime.datetime(2013, 9, 20,inpttime,00)
for x in time_generator(startDate, dterange):
time = x.strftime("%H:%M:00")
tmlist.append(time)
totalList = []
for i in range(dterange):
total = {}
total["Date"] = dtlist[i]
total["time"] = tmlist[i]
totalList.append(total)
for i in totalList:
date = i.get("Date")
time = i.get("time")
genurl = "http://controller:8080/attendance?uId=%s&%s=%sT%s.948Z"%(uId, chkType, date, time)
resp = requests.post(genurl, headers=headers)
print "Response :: %s CheckType :: %s uId :: %s Date&time :: %s %s"%(resp, chkType, uId, date, time)
except Exception as er:
print "date generator Exception Error :: %s"%er
def time_generator(start, l):
current = start
while l >= 0:
current = current + datetime.timedelta(minutes=randrange(10))
yield current
l-=1
#startDate = datetime.datetime(2013, 9, 20,10,00)
#for x in time_generator(startDate, 25):
# print x.strftime("%H:%M")
def login(tenatName, uId, chkType, inpttime):
try:
token = None
pwd = "!changeme!"
login_url = "http://controller:8080/auth/login?username=Admin@%s&password=%s" %(tenatName, pwd)
headers = []
token = requests.get(login_url, headers)
if token.text is not None:
token_dict= json.loads(token.text)
if 'access-token' in token_dict:
token = token_dict['access-token']
print "retrived token for the user " + tenatName + " with token: " + token
date_backward(token, uId, chkType, inpttime)
except Exception as er:
print "Login Exception :: %s"%er
return token
if __name__ == "__main__":
if len(sys.argv) != 5:
print "Usage: %s <1.tenantName, 2.uId, 3.AttType, 4.InputTime>" % (sys.argv[0])
sys.exit(0);
tntName = sys.argv[1]
uId = sys.argv[2]
chkType = sys.argv[3]
inpttime = int(sys.argv[4])
token = login(tntName, uId, chkType, inpttime)
| [
"mkathir@terafastnet.com"
] | mkathir@terafastnet.com |
bfc7634cdd1a75ce104e75f79c628ef609e282d3 | e1b47553d8bdbff498e3ed441f2be9e5f250c91d | /python/hrank/algo/dynamic/The_Longest_Increasing_Subsequence/lis.py | a582885252130aea1cb57d7074f885642aa5e4e7 | [] | no_license | ahmed-sharif/misc | 55b776823fc922886d4981085289e05c4be8a213 | 2ad06b132408ecfc7ac79b1cdca06588b94ef76c | refs/heads/master | 2020-04-15T14:03:38.214577 | 2017-10-25T18:00:51 | 2017-10-25T18:01:39 | 57,257,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # https://www.hackerrank.com/challenges/longest-increasing-subsequent
def compute_lis(a):
n = len(a)
lis = [1 for _ in range(n)]
for i in range(1, n):
for j in range(i):
if a[i] > a[j]:
lis[i] = max(lis[i], lis[j] + 1)
return max(lis)
a = []
n = int(raw_input().strip())
for _ in range(n):
a.append(int(raw_input().strip()))
print compute_lis(a)
| [
"asharif@linkedin.com"
] | asharif@linkedin.com |
a6bd02271c9b2f71284783392b9407f1e9b79f7f | 67f1e3d3ac20baf3530ef1ccfc3feca0de466c16 | /python/entrance/connection/cli.py | 750ed6822e0a02eab0aaa1213b253c601b550170 | [
"MIT"
] | permissive | ensoft/entrance | 6f4ae3598407fc945207bec8eeae6d7b736fc654 | 87d67e32511cab4be6b80012aa853ebbbc72d48f | refs/heads/master | 2023-03-18T06:44:17.964792 | 2023-03-02T13:16:52 | 2023-03-02T13:16:52 | 145,552,487 | 11 | 9 | MIT | 2023-02-17T16:28:29 | 2018-08-21T11:08:17 | Python | UTF-8 | Python | false | false | 1,922 | py | # Base class for a CLI ThreadedConnection
#
# Copyright (c) 2018 Ensoft Ltd
import re
from entrance.connection.threaded import ThreadedConnection
class ThreadedCLIConnection(ThreadedConnection):
"""
Base class for a ThreadedConnection whose worker thread
maintains a CLI session
"""
async def send(self, data, override=False):
"""
Send some data into the connection
"""
return await self._request("send", override, data)
async def recv(self, nbytes=0, override=False):
"""
Wait for some data from the connection. Note that this will cause the
worker thread to block until some data is available. If nbytes == 0 then
get all the data available at first shot.
"""
return await self._request("recv", override, nbytes)
async def settimeout(self, timeout, override=False):
"""
Set a timeout on send/recv operations. If hit, recv will just return a
shorter or empty response. Sends will silently drop.
"""
return await self._request("settimeout", override, timeout)
# Regexps for _expect_prompt below
_prompt = re.compile(r"(.*)RP/0/(RP)?0/CPU0:[^\r\n]*?#", re.DOTALL)
_interesting = re.compile(r"[^\n]*\n[^\n]* UTC\r\n(.*)", re.DOTALL)
async def expect_prompt(self, strip_top=False, override=False):
"""
Waits for a prompt, and returns all the characters up to that point
(optionally also stripping off an initial line and timestamp)
"""
buf = bytes()
while True:
buf += await self.recv(override=override)
m = self._prompt.match(buf.decode())
if m:
result = m.group(1)
if strip_top:
m = self._interesting.match(result)
if m:
result = m.group(1)
return result
| [
"code@chatts.net"
] | code@chatts.net |
a8e62b0c6869bc28fe1c9890457dd16af7d1ceb5 | 2656adb154039d0e1393979b225c8fe82584c00c | /render.py | 9fa244baa6a04e8da35dc5ce9eb487d7ba818f51 | [
"Apache-2.0"
] | permissive | TOKUJI/BlackBull | b90221832884db9f5cbd5a49ff3fb7951dc1ca9a | 92eb736c01464f4a76e88ce3f60e244906eb7dbb | refs/heads/master | 2022-12-08T17:30:37.163685 | 2021-06-13T08:27:47 | 2021-06-13T08:27:47 | 164,662,920 | 1 | 0 | Apache-2.0 | 2022-12-08T07:48:42 | 2019-01-08T14:11:26 | Python | UTF-8 | Python | false | false | 794 | py | from mako.template import Template
def render_login_page(data=None):
login_template = Template(filename='templates/login.html', )
return login_template.render_unicode()
def render_dummy_page(data=None):
template = Template(filename='templates/dummy.html', module_directory='templates/modules/')
return template.render_unicode(data=data)
def render_table_page(columns, data):
template = Template(filename='templates/table.html', module_directory='templates/modules/')
return template.render_unicode(columns=columns, data=data)
def render_403_page():
try:
template = Template(filename='templates/403.html', module_directory='templates/modules/')
return template.render_unicode()
except Exception:
return 'Failed to render_403_page'
| [
"14055040+TOKUJI@users.noreply.github.com"
] | 14055040+TOKUJI@users.noreply.github.com |
bdcc367d50d850b9415469d0b80cd63c73ec7513 | 3a6a211ea0d32405497fbd6486c490bb147e25f9 | /third_party/webtest/webtest/http.py | 890ef96ff90e6543783dd70d2c2b7b0768054a2f | [
"BSD-3-Clause",
"MIT"
] | permissive | catapult-project/catapult | e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0 | 53102de187a48ac2cfc241fef54dcbc29c453a8e | refs/heads/main | 2021-05-25T07:37:22.832505 | 2021-05-24T08:01:49 | 2021-05-25T06:07:38 | 33,947,548 | 2,032 | 742 | BSD-3-Clause | 2022-08-26T16:01:18 | 2015-04-14T17:49:05 | HTML | UTF-8 | Python | false | false | 4,240 | py | # -*- coding: utf-8 -*-
"""
This module contains some helpers to deal with the real http
world.
"""
import threading
import logging
import select
import socket
import time
import os
import six
import webob
from six.moves import http_client
from waitress.server import TcpWSGIServer
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
ip, port = s.getsockname()
s.close()
ip = os.environ.get('WEBTEST_SERVER_BIND', '127.0.0.1')
return ip, port
def check_server(host, port, path_info='/', timeout=3, retries=30):
"""Perform a request until the server reply"""
if retries < 0:
return 0
time.sleep(.3)
for i in range(retries):
try:
conn = http_client.HTTPConnection(host, int(port), timeout=timeout)
conn.request('GET', path_info)
res = conn.getresponse()
return res.status
except (socket.error, http_client.HTTPException):
time.sleep(.3)
return 0
class StopableWSGIServer(TcpWSGIServer):
"""StopableWSGIServer is a TcpWSGIServer which run in a separated thread.
This allow to use tools like casperjs or selenium.
Server instance have an ``application_url`` attribute formated with the
server host and port.
"""
was_shutdown = False
def __init__(self, application, *args, **kwargs):
super(StopableWSGIServer, self).__init__(self.wrapper, *args, **kwargs)
self.runner = None
self.test_app = application
self.application_url = 'http://%s:%s/' % (self.adj.host, self.adj.port)
def wrapper(self, environ, start_response):
"""Wrap the wsgi application to override some path:
``/__application__``: allow to ping the server.
``/__file__?__file__={path}``: serve the file found at ``path``
"""
if '__file__' in environ['PATH_INFO']:
req = webob.Request(environ)
resp = webob.Response()
resp.content_type = 'text/html; charset=UTF-8'
filename = req.params.get('__file__')
if os.path.isfile(filename):
body = open(filename, 'rb').read()
body = body.replace(six.b('http://localhost/'),
six.b('http://%s/' % req.host))
resp.body = body
else:
resp.status = '404 Not Found'
return resp(environ, start_response)
elif '__application__' in environ['PATH_INFO']:
return webob.Response('server started')(environ, start_response)
return self.test_app(environ, start_response)
def run(self):
"""Run the server"""
try:
self.asyncore.loop(.5, map=self._map)
except select.error: # pragma: no cover
if not self.was_shutdown:
raise
def shutdown(self):
"""Shutdown the server"""
# avoid showing traceback related to asyncore
self.was_shutdown = True
self.logger.setLevel(logging.FATAL)
while self._map:
triggers = list(self._map.values())
for trigger in triggers:
trigger.handle_close()
self.maintenance(0)
self.task_dispatcher.shutdown()
return True
@classmethod
def create(cls, application, **kwargs):
"""Start a server to serve ``application``. Return a server
instance."""
host, port = get_free_port()
if 'port' not in kwargs:
kwargs['port'] = port
if 'host' not in kwargs:
kwargs['host'] = host
if 'expose_tracebacks' not in kwargs:
kwargs['expose_tracebacks'] = True
server = cls(application, **kwargs)
server.runner = threading.Thread(target=server.run)
server.runner.daemon = True
server.runner.start()
return server
def wait(self, retries=30):
"""Wait until the server is started"""
running = check_server(self.adj.host, self.adj.port,
'/__application__', retries=retries)
if running:
return True
try:
self.shutdown()
finally:
return False
| [
"qyearsley@google.com"
] | qyearsley@google.com |
2e95edb992349cc95441512bef5344b238ed4afd | c3c2af25c3269e200d2773ec9f8800f4f9a20165 | /backend/manage.py | 42924076b3f1daf8f7bf76a1488f43e45b84b567 | [] | no_license | crowdbotics-apps/divine-hill-27443 | a39ecac7c1c5f510d00bf4e300acea3e46ecca24 | f6abe52a7080da59cc99b1fb01a039933f273a2c | refs/heads/master | 2023-04-26T09:05:39.002510 | 2021-05-26T19:29:02 | 2021-05-26T19:29:02 | 371,147,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'divine_hill_27443.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
c654094c0fdeb50abb400c1c1d0e67fc97c80eb7 | 36a0b764f6a899ce381aa5c4e69257d6c6f7f65b | /meiduo_mall/utils/fdfs/MyFileStorage.py | 32fa6af0e25bb0339eb83bd0ea424b77dca016d0 | [] | no_license | create6/meiduo_ub | ec23b1b0e88f784dacbd5c18cbe5c83799e2b88c | 4651f21e9669b0e8224004cb596a08195109677d | refs/heads/master | 2020-05-31T08:10:57.178001 | 2019-06-28T02:15:02 | 2019-06-28T02:15:02 | 188,504,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | from django.conf import settings
from django.core.files.storage import Storage
from fdfs_client.client import Fdfs_client
"""
自定义文件存储类(官方文档-视图层-文件上传-自定义存储):
1, 定义类继承自Storage
2, 必须保证参数能够初始化
3, 必须实现open,save方法
"""
class MyStorage(Storage):
def __init__(self, base_url=None):
if not base_url:
base_url = settings.BASE_URL
self.base_url = base_url
def open(self, name, mode='rb'):
"""打开文件的时候调用"""
pass
def save(self, name, content, max_length=None):
"""保存文件的时候调用"""
# 1.1 导入fdfs 配置文件
client = Fdfs_client(settings.FDFS_CONFIG)
result = client.upload_by_buffer(content.read())
# 1.2判断是否上传成功
if result["Status"] != "Upload successed.":
return None
# 1.3 取出图片
image_url = result["Remote file_id"]
#1.4 返回图片
return image_url
def exists(self, name):
"""上传的时候判断图片是否存在了"""
pass
def url(self, name):
"""返回图片的url地址"""
return self.base_url + name | [
"1362254116@qq.com"
] | 1362254116@qq.com |
4e52cc987f5b50af5d9b223ce2923ca558ff6980 | 3d2d5ecb7377410d309737412febce208138b583 | /1-Regular Expression/count_num_of_number.py | 9a505e1802aab7af797e54e4d9c1947ea8948616 | [] | no_license | rika77/access_web_data | fd938c8cac4f7ac8bdabdc511ec5034bb6d5f003 | 2c76f5a482d08c655d637944014076c9752e7f2d | refs/heads/master | 2020-09-05T13:57:23.726304 | 2019-11-07T01:39:06 | 2019-11-07T01:39:06 | 220,126,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | import re
with open('regex_sum_180748.txt') as f:
s = f.read()
nums = re.findall('[0-9]+', s)
ans = 0
for num in nums:
ans += int(num)
print(ans)
| [
"mahimahi.kametan@gmail.com"
] | mahimahi.kametan@gmail.com |
b9a0d4a8c907d64a769984ce54c21e598bceb55a | 857fc21a40aa32d2a57637de1c723e4ab51062ff | /PythonChallenge/Ex05/05_01.py | 93d0926dda5e81b77d9190b1a9e433c954140ed4 | [
"MIT"
] | permissive | YorkFish/git_study | efa0149f94623d685e005d58dbaef405ab91d541 | 6e023244daaa22e12b24e632e76a13e5066f2947 | refs/heads/master | 2021-06-21T18:46:50.906441 | 2020-12-25T14:04:04 | 2020-12-25T14:04:04 | 171,432,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | #!/usr/bin/env python3
# coding:utf-8
from pickle import load
with open("banner.p", "rb") as f:
print(load(f))
| [
"18258788231@163.com"
] | 18258788231@163.com |
626bb65d7cd1ec6e7fc65aef68d875d25f67e8f3 | 3c6b5cdad2099125c9713041f21a14f41693f5b3 | /init/tar.py | 24de4fc9a420da40a63d3639ad7617086615fe08 | [] | no_license | fudashuai/SetDisplayMode | 6d41d625e17c755d7ad913182fce83288353f5b0 | 23183820cbb4d9d20ea6aacfaa4e21bc30fd7cb5 | refs/heads/main | 2023-06-15T07:42:21.198382 | 2021-07-18T05:38:22 | 2021-07-18T05:38:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | #!/usr/bin/env/python3
# -*- coding:utf-8 -*-
import os
import platform
import shutil
import tempfile
import time
from distutils.core import setup
from pathlib import Path
from subprocess import PIPE, run
from zipfile import ZipFile
def pack_pyd(cur_dir, excepts=('launch.py')):
while True:
try:
from Cython.Build import cythonize
break
except ImportError:
run('pip install cython', shell=True, stdout=PIPE, stderr=PIPE)
time.sleep(5)
build_dir_list = set()
temp_dir_list = set()
temp_file_list = set()
for file in cur_dir.rglob('*.py'):
if file.name not in excepts:
_file = str(file)
build_dir = file.parent
_build_dir = str(build_dir)
build_temp_dir = build_dir / 'temp'
_build_temp_dir = str(build_temp_dir)
try:
setup(ext_modules=cythonize([_file]),
script_args=[
"build_ext", "-b", _build_dir, "-t", _build_temp_dir
])
build_dir_list.add(build_dir)
temp_file_list.add(file)
temp_file_list.add(file.with_suffix('.c'))
temp_dir_list.add(build_temp_dir)
except Exception as e:
print(f'error occurred during pack {file}, message: {e}')
for build_dir in build_dir_list:
for file in build_dir.iterdir():
if file.suffix in ('.pyd', '.so'):
name_list = file.name.split('.')
new_name = file.with_name(f'{name_list[0]}.{name_list[-1]}')
os.rename(file, new_name)
for temp_dir in temp_dir_list:
try:
shutil.rmtree(temp_dir)
except Exception as e:
print(e)
pass
for temp_file in temp_file_list:
try:
os.remove(temp_file)
except Exception as e:
print(e)
def tar():
temp_dir = Path(tempfile.mkdtemp())
for file in root_dir.iterdir():
if file.name not in ('.git', '.venv', '.vscode', 'db', 'input', 'log',
'output'):
src = file
dst = temp_dir / file.name
if file.is_file():
shutil.copy(src, dst)
else:
shutil.copytree(src, dst)
db_dir = temp_dir / 'db'
input_dir = temp_dir / 'input'
log_dir = temp_dir / 'log'
output_dir = temp_dir / 'output'
dirs = (log_dir, output_dir)
for dir in dirs:
dir.mkdir(parents=True, exist_ok=True)
app_path = root_dir / (root_dir.name + '.zip')
os.chdir(temp_dir)
with ZipFile(app_path, 'w') as z_file:
path = Path()
for file in path.rglob('*'):
z_file.write(file)
os_platform = platform.system()
if os_platform == 'Windows':
run(f'EXPLORER {root_dir}', shell=True, stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
pwd = Path(__file__).resolve()
root_dir = pwd.parent.parent
core_dir = root_dir / 'core'
# pack_pyd(core_dir)
tar()
| [
"funchan@msn.cn"
] | funchan@msn.cn |
3ed12bf18a7b64349018b7c6e07ad53b57159f1b | a0a656cefd97b6f6416a8f6f38d1ed8524589929 | /add_assets/serializers.py | 0ef70f1a4ded1da53789566550b3661a1cafa9bd | [] | no_license | zhixingchou/opsmanage-copy-add_assets | c932bf2641ff408be1d43bbf96b50ad2f47acb81 | c120889f9d1aa74de3f3e33f787aa5b9343e9011 | refs/heads/master | 2021-06-28T23:21:42.458019 | 2017-09-13T05:37:31 | 2017-09-13T05:37:31 | 103,357,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # coding:utf-8
from rest_framework import serializers
from appone.models import *
from django.contrib.auth.models import Group,User
class AssetsSerializer(serializers.ModelSerializer):
class Meta:
model = Assets
fields = ('id','assets_type','management_ip')
class ServerSerializer(serializers.ModelSerializer):
assets = AssetsSerializer(required=False)
class Meta:
model = Server_Assets
fields = ('id','ip','hostname','username','port','passwd','cpu','assets')
def create(self, data):
if(data.get('assets')):
assets_data = data.pop('assets')
assets = Assets.objects.create(**assets_data)
else:
assets = Assets()
data['assets'] = assets;
server = Server_Assets.objects.create(**data)
return server | [
"376616017@qq.com"
] | 376616017@qq.com |
e4b3cbe089af89580345befdfa21e0813a578041 | f7779fc2b88a0f4ad66b1e941f057f9f2be91f73 | /apps/casts/migrations/0006_auto__add_unique_castsservices_name.py | 8faa9175262fba990db3336940348c3e933a6dd2 | [] | no_license | tumani1/vsevi | e30664c1573996bcd825e57e7b8137345c0858e7 | 6504b7689573055405ee044b2d75c31cb1ab0a2d | refs/heads/master | 2020-07-21T22:33:44.384719 | 2014-11-07T15:28:10 | 2014-11-07T15:28:10 | 35,594,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,157 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'CastsServices', fields ['name']
db.create_unique('casts_services', ['name'])
def backwards(self, orm):
# Removing unique constraint on 'CastsServices', fields ['name']
db.delete_unique('casts_services', ['name'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'casts.abstractcaststags': {
'Meta': {'object_name': 'AbstractCastsTags', 'db_table': "'abstract_casts_tags'"},
'description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'}),
'name_orig': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'})
},
'casts.casts': {
'Meta': {'object_name': 'Casts', 'db_table': "'casts'"},
'description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pg_rating': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'search_index': ('djorm_pgfulltext.fields.VectorField', [], {'default': "''", 'null': 'True', 'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'casts'", 'symmetrical': 'False', 'to': "orm['casts.AbstractCastsTags']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title_orig': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'casts.castschats': {
'Meta': {'object_name': 'CastsChats', 'db_table': "'casts_chats'"},
'cast': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['casts.Casts']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {})
},
'casts.castschatsmsgs': {
'Meta': {'object_name': 'CastsChatsMsgs', 'db_table': "'casts_chats_msgs'"},
'cast': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['casts.Casts']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'casts.castschatsusers': {
'Meta': {'object_name': 'CastsChatsUsers', 'db_table': "'casts_chats_users'"},
'blocked': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cast': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['casts.Casts']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'casts.castsextras': {
'Meta': {'object_name': 'CastsExtras', 'db_table': "'extras_casts'"},
'cast': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['casts.Casts']"}),
'extra': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['films.FilmExtras']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'casts.castslocations': {
'Meta': {'object_name': 'CastsLocations', 'db_table': "'casts_locations'"},
'cast': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cl_location_rel'", 'to': "orm['casts.Casts']"}),
'cast_service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['casts.CastsServices']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'price_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'quality': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'}),
'url_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'})
},
'casts.castsservices': {
'Meta': {'object_name': 'CastsServices', 'db_table': "'casts_services'"},
'description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'casts_services'", 'symmetrical': 'False', 'to': "orm['casts.AbstractCastsTags']"}),
'update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'})
},
'casts.userscasts': {
'Meta': {'object_name': 'UsersCasts', 'db_table': "'users_casts'"},
'cast': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['casts.Casts']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rating': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'subscribed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'films.cities': {
'Meta': {'object_name': 'Cities', 'db_table': "'cities'"},
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'to': "orm['films.Countries']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_orig': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'films.countries': {
'Meta': {'object_name': 'Countries', 'db_table': "'countries'"},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_orig': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'films.filmextras': {
'Meta': {'object_name': 'FilmExtras', 'db_table': "'films_extras'"},
'description': ('django.db.models.fields.TextField', [], {}),
'film': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fe_film_rel'", 'to': "orm['films.Films']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_orig': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'films.films': {
'Meta': {'object_name': 'Films', 'db_table': "'films'"},
'age_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'countries'", 'symmetrical': 'False', 'to': "orm['films.Countries']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'genres'", 'symmetrical': 'False', 'to': "orm['films.Genres']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imdb_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kinopoisk_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'db_index': 'True'}),
'kinopoisk_lastupdate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_orig': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'}),
'persons': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'persons'", 'symmetrical': 'False', 'through': "orm['films.PersonsFilms']", 'to': "orm['films.Persons']"}),
'rating_cons': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'rating_cons_cnt': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'rating_imdb': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'rating_imdb_cnt': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'rating_kinopoisk': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rating_kinopoisk_cnt': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rating_local': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'rating_local_cnt': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'rating_sort': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'release_date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'search_index': ('djorm_pgfulltext.fields.VectorField', [], {'default': "''", 'null': 'True', 'db_index': 'True'}),
'seasons_cnt': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'was_shown': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'world_release_date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'films.genres': {
'Meta': {'object_name': 'Genres', 'db_table': "'genres'"},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'hidden': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'rgt': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'films.persons': {
'Meta': {'object_name': 'Persons', 'db_table': "'persons'"},
'bio': ('django.db.models.fields.TextField', [], {}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'to': "orm['films.Cities']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kinopoisk_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_orig': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'films.personsfilms': {
'Meta': {'unique_together': "(('film', 'person', 'p_type'),)", 'object_name': 'PersonsFilms', 'db_table': "'persons_films'"},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'film': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pf_films_rel'", 'to': "orm['films.Films']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'p_character': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'p_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'p_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pf_persons_rel'", 'to': "orm['films.Persons']"})
}
}
complete_apps = ['casts'] | [
"dlyubimov@aaysm.com"
] | dlyubimov@aaysm.com |
716cb29adc3becdd4a5965eee788155597749d7b | c618cbb638e1c69e08ad26df505971d8f33b100c | /profiles/migrations/0001_initial.py | 598e94cc6d9faf816a5297467fe411cbb294bb0e | [] | no_license | mikeemorales/house-plant | bf612d461459429cd1898d9ba7e2e8f6d90544a3 | 3b76df1c6464a22f1cd76b1f650d2cfe584e61ed | refs/heads/master | 2023-08-23T03:23:27.836900 | 2021-09-18T03:22:34 | 2021-09-18T03:22:34 | 339,876,234 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | # Generated by Django 3.1.6 on 2021-02-25 06:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('default_phone_number', models.CharField(blank=True, max_length=20, null=True)),
('default_country', django_countries.fields.CountryField(blank=True, max_length=2, null=True)),
('default_postcode', models.CharField(blank=True, max_length=20, null=True)),
('default_town_or_city', models.CharField(blank=True, max_length=40, null=True)),
('default_street_address1', models.CharField(blank=True, max_length=80, null=True)),
('default_street_address2', models.CharField(blank=True, max_length=80, null=True)),
('default_county', models.CharField(blank=True, max_length=80, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"j.mikeemorales@gmail.com"
] | j.mikeemorales@gmail.com |
bb8ae320072d41a28d6b424e78999326d64cdafb | 3028b704ebfcec101577634ae45bd5ed84bdcd2d | /验证码.py | aee2fdf52f874399ca9423210e0a7afa4af02b72 | [] | no_license | ANGLE-AKAMA/VerifyPicture | f73fb482d9fd61b79e523e0faa344b9dd098ffb7 | 81b009c4e4ffc2f4d01281d148fb99d497d4df2b | refs/heads/master | 2020-08-13T19:44:02.128680 | 2019-10-14T11:46:11 | 2019-10-14T11:46:11 | 215,027,517 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,200 | py | import cv2
import numpy as np
import time
import os
import glob
import sys
import reductNoise
import pytesseract
from PIL import Image
from waterDropleCut import WaterCutting
from waterFill import WaterFilled
for file in os.listdir("easy_img") :
fileName= file.split(".")[0]
filepath = os.path.join("easy_img",file)
im = cv2.imread(filepath)
#将图片转成灰度图
im_gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
# cv2.imwrite(os.path.join("image",fileName+"_gray.jpg"),im_gray)
#将像素做二值化处理
ret,im_res = cv2.threshold(im_gray,240,255,cv2.THRESH_BINARY)
# cv2.imwrite(os.path.join("image",fileName+"_threshold.jpg"),im_res)
# #用高斯模糊对图片进行降噪
# kernel = 1/9*np.array([[1,2,1],[2,4,2],[1,2,1]])
# im_blur = cv2.filter2D(im_inv,-1,kernel)
# cv2.imwrite(os.path.join("image",fileName+"_filter2d.jpg"),im_inv)
# #对图片进行进一轮的二值化处理
# ret,im_res = cv2.threshold(im_blur,150,255,cv2.THRESH_BINARY_INV)
# cv2.imwrite(os.path.join("image",fileName+"_threshold2.jpg"),im_res)
print("第二轮二值化处理完毕。。。。")
#根据扫描法对图片降噪处理
results = reductNoise.reductNoise(im_res)
# cv2.imwrite(os.path.join("image",fileName+"_reductNoise.jpg"),im_res)
print("扫描法处理完毕。。。。")
#去除两边的空白
im_res = reductNoise.arrayTrim(im_res)
# cv2.imwrite(os.path.join("image",fileName+"_arrayTrim.jpg"),im_res)
#使用泛水填充法对图片进行区域划分
waterFill = WaterFilled()
waterFill.filled(im_res)
waterFill.splitRegion(8,im_res)
ret,im_res = cv2.threshold(im_res,100,255,cv2.THRESH_BINARY)
cv2.imwrite(os.path.join("image",fileName+"_waterFill.jpg"),im_res)
print("泛水填充法处理完毕。。。。。")
#对图片进行切割
x_len = len(im_res[0])
y_len = len(im_res)
cut_step = int(x_len/4)
cut_points = [cut_step,cut_step*2,cut_step*3,cut_step*4]
for index,point in enumerate(cut_points) :
try :
results = [[0] for i in range(y_len)]
waterCutt = WaterCutting(point,0,im_res)
results[0] = [im_res[0][i] for i in range(index*cut_step,point)]
temp_y = 0
while 1==1 :
x,y = next(waterCutt)
#如果y大于temp_y 则results增加一行
if y > temp_y :
results[y] = [im_res[y][i] for i in range(index*cut_step,point)]
else :
results[y].append(im_res[y][x])
except StopIteration :
im_result = np.asarray(results)
im_result = cv2.resize(im_result, (60, 60))
kernel = 1/9*np.array([[1,2,1],[2,4,2],[1,2,1]])
im_result = cv2.filter2D(im_result,-1,kernel)
# char = pytesseract.image_to_string(im_result,lang="normal",config="--psm 10")
# print("char===="+char)
cv2.imwrite(os.path.join("char",fileName+"_waterCutting"+"_"+str(index)+".jpg"),im_result)
del results
print("处理完毕了")
print("水滴切割法处理完毕。。。。。")
| [
"18138802413@163.com"
] | 18138802413@163.com |
48055f2562c59506a8d7d57a62f9919785d4a11c | c96ce403a4f4db5594639d2687bf01af8fe02f2e | /lesson_python_socket_client.py | 79307ea721212d093910a6450632ad7e2917f674 | [] | no_license | jiangyefan/lesson_python | b8aae7a0581fdad747271da044d87471973d51ba | 4aa09e1e5d9fa995567a2f220c698d15711735e8 | refs/heads/master | 2021-01-02T22:47:27.064987 | 2017-08-06T22:59:15 | 2017-08-06T22:59:15 | 99,359,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | #!/usr/bin/env python
#coding=utf-8
#__author__:Administrator
#__date__:2017/8/5
import socket
#客户端
#connect
#recv()
#send()
#sendall()
#close()
#创建socket对象
sk2=socket.socket()
address=('127.0.0.1',8011)
sk2.connect(address)
# data=sk2.recv(1024) #接收数据
# print(str(data,'utf8'))
while True:
inp=input('>>>')
if inp=='exit': #客户端输入exit后 退出聊天
break
else:
sk2.send(bytes(inp, 'utf8')) # 发送数据
result_len=int(str(sk2.recv(1024),'utf8')) #接收命令结果的长度
print(result_len)
data=bytes()#初始一个bytes空类型,以供下面累加
while len(data)!=result_len:#判断长度是否超过命令结果长度,等于代表接受完
data += sk2.recv(1024)
print(str(data, 'gbk'))
# print(str(data, 'utf8'))
# data=sk2.sendall(bytes('123','utf8'))
sk2.close() | [
"200592201@qq.com"
] | 200592201@qq.com |
2c7332530c6106c9f596a55673e138596fa175ad | be7a0aa49a9b4fdad1b8b21c6f1eb6bd508be109 | /ex027vs1.py | 8f5869722797ed74a9a1bd50c65b05a9267c8f63 | [] | no_license | tlima1011/python3-curso-em-video | 29a60ee3355d6cb3ba8d1f48c6a3ecd7bc6e60dd | f6454f4d636a2bf73c151e67710f732e2d8e738c | refs/heads/master | 2021-02-04T01:13:35.313590 | 2020-04-14T12:51:19 | 2020-04-14T12:51:19 | 243,593,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | nomeCompleto = str(input('Informe seu nome completo.: ')).strip()
nomeCompleto = nomeCompleto.split()
print(f'Primeiro nome é {nomeCompleto[0].capitalize()} e o último é {nomeCompleto[-1].capitalize()}')
| [
"noreply@github.com"
] | noreply@github.com |
caffdc2df4dd79e41559544d02eaf346767be562 | f9258ddebd486a3452679b1524834c63fcdfdb39 | /src/project5.py | da6c8401460dfc1fab1095c713878dba49143e42 | [] | no_license | gracemc1003/Project5 | bfe11357630a969a208a982898486a36325f8603 | d8fa3d113fb4dbc0121c739769f0491e5ad3cb93 | refs/heads/main | 2023-01-01T09:56:14.729995 | 2020-10-12T18:27:02 | 2020-10-12T18:27:02 | 303,476,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,917 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 12:38:03 2020
@author: gracemcmonagle
"""
#%%
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.model_selection import train_test_split
from statsmodels.tsa.holtwinters import SimpleExpSmoothing, Holt
filepath = '/Users/gracemcmonagle/Desktop/School/Fall 2020/EECS 731/Project 5/src/data/Historical Product Demand.csv'
rawData = pd.read_csv(filepath, delimiter = ',')
rawData['Date'] = pd.to_datetime(rawData['Date'], format='%Y/%m/%d')
#%%
# count the different unique products and warehouses, make sure no nan values
noProducts = rawData['Product_Code'].value_counts(dropna=False)
rawData['Warehouse'].value_counts(dropna=False)
rawData['Product_Category'].value_counts(dropna=False)
#%% We find that some of them are listed as (), so we need to fix that
#rawData['Order_Demand'] = rawData['Order_Demand'].astype(str)
#for index, row in rawData.iterrows():
# row['Order_Demand'] = row['Order_Demand'].replace('(', '').replace(')', '')
#rawData['Order_Demand'] = pd.to_numeric(rawData['Order_Demand'])
#%%
specProd = rawData[rawData['Product_Code'] == 'Product_1341']
specProd.sort_values(by=['Date'])
#plt.hist(specProd['Order_Demand'])
specProd['Order_Demand'] = pd.to_numeric(specProd['Order_Demand'])
#plt.plot(specProd['Date'], specProd['Order_Demand'])
#no of orders on a specific day
specProd['Orders'] = specProd.groupby('Date')['Product_Code'].transform('count')
specProd['Demand'] = specProd.groupby('Date')['Order_Demand'].transform('sum')
#%%
#number of orders by month
noOrdersByDay = pd.DataFrame(specProd.groupby(specProd['Date'].dt.strftime('%Y/%m/%d'))['Orders'].sum())
demandByDay = pd.DataFrame(specProd.groupby(specProd['Date'].dt.strftime('%Y/%m/%d'))['Demand'].max())
noOrdersByDay['Day'] = noOrdersByDay.index
noOrdersByDay['Day'] = pd.to_datetime(noOrdersByDay['Day'], format='%Y/%m/%d')
byDay = pd.concat([noOrdersByDay, demandByDay], axis = 1)
byDay = byDay.sort_values(by=['Day'])
plt.scatter(byDay['Day'], byDay['Orders'])
plt.show()
plt.scatter(byDay['Day'], byDay['Demand'])
train, test = train_test_split(byDay['Demand'], shuffle=False)
model = SimpleExpSmoothing(np.asarray(byDay['Demand']))
model._index = pd.to_datetime(train.index)
fit1 = model.fit()
pred1 = fit1.forecast(test)
fit2 = model.fit(smoothing_level=.2)
pred2 = fit2.forecast(test)
fit3 = model.fit(smoothing_level=.5)
pred3 = fit3.forecast(test)
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(train.index[150:], train.values[150:])
ax.plot(test.index, test.values, color="gray")
for p, f, c in zip((pred1, pred2, pred3),(fit1, fit2, fit3),('#ff7823','#3c763d','c')):
ax.plot(train.index[150:], f.fittedvalues[150:], color=c)
ax.plot(test.index, p, label="alpha="+str(f.params['smoothing_level'])[:3], color=c)
plt.title("Simple Exponential Smoothing")
plt.legend();
| [
"noreply@github.com"
] | noreply@github.com |
6433092cbee060d537b5cb9919a76a1ec7c5ab85 | 683b73e0c95c755a08e019529aed3ff1a8eb30f8 | /machina/apps/forum_conversation/forum_attachments/admin.py | de1995638c922ddee9447fdc8ec8937ae0ebd484 | [
"BSD-3-Clause"
] | permissive | DrJackilD/django-machina | b3a7be9da22afd457162e0f5a147a7ed5802ade4 | 76858921f2cd247f3c1faf4dc0d9a85ea99be3e1 | refs/heads/master | 2020-12-26T08:19:09.838794 | 2016-03-11T03:55:25 | 2016-03-11T03:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
# Standard library imports
# Third party imports
from django.contrib import admin
# Local application / specific library imports
from machina.core.db.models import get_model
Attachment = get_model('forum_attachments', 'Attachment')
class AttachmentAdmin(admin.ModelAdmin):
list_display = ('id', 'post', 'comment', 'file', )
list_display_links = ('id', 'post', 'comment', )
raw_id_fields = ('post', )
admin.site.register(Attachment, AttachmentAdmin)
| [
"morgan.aubert@zoho.com"
] | morgan.aubert@zoho.com |
805e86f1d2bd1c867967768dadacd89d4c06a759 | 8823a9b97780bdf5f92c2ef3bfab28ea5a95abc0 | /problems/strings/tests/test_substrings.py | 434a5384b114cf584fe7f903cd441b2004d71c0d | [] | no_license | castlemilk/puzzles | 1ef2f9b7d0707e821869064b78a57691f2ba7a0a | 4cdc757fd14a444abe48254a0ecc8b04ca4b9552 | refs/heads/master | 2022-12-15T05:19:51.818193 | 2018-03-07T10:32:36 | 2018-03-07T10:32:36 | 122,824,563 | 0 | 0 | null | 2022-12-07T23:46:49 | 2018-02-25T10:07:36 | Python | UTF-8 | Python | false | false | 190 | py | from problems.strings import substrings
def test_permutations1():
assert substrings.is_permutation('abcd','aaaaa') is False
assert substrings.is_permutation('abcd', 'dcab') is True | [
"ben.ebsworth@gmail.com"
] | ben.ebsworth@gmail.com |
6e3dd7d9f9580b03b86f84b139624874d5968242 | 1391fe8dcace0742232d11d96f1eca5da02581ba | /test.py | 465cf5bafb6627ab4907e62fc187ca32fd3dbf60 | [] | no_license | ha8sh/test | a0b879d2cc7b8d753aa7f4bfd75703cfa3b02d51 | af18128bae0ad195f9fb064da0807d67fd65180d | refs/heads/master | 2023-02-26T04:33:11.201116 | 2021-01-25T19:10:07 | 2021-01-25T19:10:07 | 332,850,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | print("hassan")
| [
"root@ibs.ping308.net"
] | root@ibs.ping308.net |
e2ded22e10d7c8fdba1a33dd11b5e8581b59afb5 | 16158ba3a3d94569498d51ab03ac19d7444b0349 | /kg/Recommender-System-master/Recommender_System/algorithm/KGCN/gpu_memory_growth.py | 9a808bd2987cfa7e6fb19015613b64193b9534f9 | [
"MIT"
] | permissive | lulihuang/DL_recommendation | c4109346a93336d85eed7bce2acfea143c76012c | 5e8b1b562cb4e6360103b11b6ec5601cd69282a3 | refs/heads/main | 2023-03-22T12:51:08.395165 | 2021-03-11T03:11:59 | 2021-03-11T03:11:59 | 328,589,292 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
import此文件后将gpu设置为显存增量模式
"""
from tensorflow import config
gpus = physical_devices = config.list_physical_devices('GPU')
if len(gpus) == 0:
print('当前没有检测到gpu,设置显存增量模式无效。')
for gpu in gpus:
try:
config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
| [
"727445285@qq.com"
] | 727445285@qq.com |
be9c106d741c93b8522ff5e49ea7ff2e5f2b74fe | aeeba89591b787bbe6b93ffb4889be9a8fca521e | /cfg.py | cf7791d7a7b0fe3603dac542a0bbc59c1ee3d3aa | [
"MIT"
] | permissive | wistic/python-web-crawler | efa7968f66ecd7396797390f253d0ff68f3623a1 | e3738fd49d77bdff4c43a0ec31ed36cc381d26b8 | refs/heads/master | 2022-12-10T05:38:40.030202 | 2020-08-28T14:24:38 | 2020-08-28T14:24:38 | 288,676,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | config = {
"root_url": "https://flinkhub.com",
"sleep_timer": 5,
"Max_links_limit": 5000,
"Recrawl_time_limit_hours": 24,
"user_agent": "Python Spiderbot",
"No_of_threads": 5,
"database_name": "python-web-crawler",
"collection_name": "Links",
"connection_uri": "mongodb://localhost:27017/",
"download_dir_path": "/home/wistic/github/python-web-crawler/html-files"
}
| [
"prathameshkatkar11@gmail.com"
] | prathameshkatkar11@gmail.com |
0a38162b769782206e31ce264818a2b2f80a10da | afff172ee00de38de8b7d6663cfbd85d80cf2db9 | /blog/models.py | 7634e140fb8e695428f95cdddf51b62e5434a0d0 | [] | no_license | marmik13/DjangoEcommerce-site | f7af4748ad75de4dce2be9c37e6773bf25a8d900 | 47a7819db4e1be5ea61d646279a5cd47d25da961 | refs/heads/master | 2022-07-26T10:51:37.652455 | 2020-05-24T05:40:09 | 2020-05-24T05:40:09 | 264,853,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | from django.db import models
# Create your models here.
class Blogpost(models.Model):
post_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=200)
head0 = models.CharField(max_length=500, default="")
chead0 = models.CharField(max_length=10000, default="")
head1 = models.CharField(max_length=500, default="")
chead1 = models.CharField(max_length=10000, default="")
head2 = models.CharField(max_length=500, default="")
chead2 = models.CharField(max_length=10000, default="")
pub_date = models.DateField()
thumbnail = models.ImageField(upload_to='blog/images', default="")
def __str__(self):
return self.title | [
"marmikpatelme13398@gmail.com"
] | marmikpatelme13398@gmail.com |
7f9d646ad4abee72b62035b949da452c7e34ddbd | 35f076d1e8b5bbc3871a622b7808246e24b87f95 | /src/github/__init__.py | 10e03497751ba2b6964ca5e7dd22f5646fd3c7f5 | [] | no_license | elena20ruiz/github_crawler | 379ece12e07dcc5c866ca2aa8604d564d45010ff | dad9484539cc371584eb94ecfc20861e342325ee | refs/heads/master | 2020-12-27T18:17:21.001287 | 2020-02-08T17:35:18 | 2020-02-08T17:35:18 | 238,001,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py |
GITHUB_URL = 'https://github.com'
__all__ = [
'GITHUB_URL'
] | [
"elena.ruiz.bdn@gmail.com"
] | elena.ruiz.bdn@gmail.com |
ffa1f1f3a4d948cf970225aaaf6d9dfcbc38315c | d4ce88b6262d30e86f9be2d5635df8544df846e2 | /Web Scrapping/Selenium-Request&Beutifulsoup Detailed/Selenium/selenim_intr.py | 353973c7f248a049a92800b0e4111b37b27abe14 | [] | no_license | ahmetzekiertem/Web-Scrapping | e8d5d3fd2da03f408553c3f6fa1815d79d663af2 | 75a95b8373a84357bdd037cf6fef7461097bfacf | refs/heads/master | 2022-04-19T16:32:28.369369 | 2020-04-19T12:51:42 | 2020-04-19T12:51:42 | 256,998,504 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py |
from selenium import webdriver # imports
from time import sleep
from bs4 import BeautifulSoup
# make a webdriver object - chrome driver path for my system -- > /Users/waqarjoyia/Downloads/chromedriver
driver = webdriver.Chrome('/Users/mac/Desktop/geckodriver')
# open some page using get method - url -- > parameters
# close webdriver object
driver.close()
| [
"40203533+ahmetzekiertem@users.noreply.github.com"
] | 40203533+ahmetzekiertem@users.noreply.github.com |
d5badcde10dd50e2893aabf8911f0571f3d6e3e6 | 397383b771362fd00f83ef95a783e0578b3d3495 | /firstVideo.py | 6dbb481e3b8e4e5cee5ace6afce80e5d0fc1fd22 | [] | no_license | kshaba01/parrot-ar-drone | d2ee3d2119dd6fc18906ed860de12d5638044d1c | a8c37d7fc1396e2ab265545d27c1431bed166899 | refs/heads/main | 2023-01-01T15:34:45.705102 | 2020-10-26T20:03:19 | 2020-10-26T20:03:19 | 307,161,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,807 | py | #########
# firstVideo.py
# This program is part of the online PS-Drone-API-tutorial on www.playsheep.de/drone.
# It shows the general usage of the video-function of a Parrot AR.Drone 2.0 using the PS-Drone-API.
# The drone will stay on the ground.
# Dependencies: a POSIX OS, openCV2, PS-Drone-API 2.0 beta or higher.
# (w) J. Philipp de Graaff, www.playsheep.de, 2014
##########
# LICENCE:
# Artistic License 2.0 as seen on http://opensource.org/licenses/artistic-license-2.0 (retrieved December 2014)
# Visit www.playsheep.de/drone or see the PS-Drone-API-documentation for an abstract from the Artistic License 2.0.
###########
##### Suggested clean drone startup sequence #####
import time, sys
import ps_drone3 as ps_drone # Import PS-Drone
drone = ps_drone.Drone() # Start using drone
drone.startup() # Connects to drone and starts subprocesses
drone.reset() # Sets drone's status to good (LEDs turn green when red)
while (drone.getBattery()[0] == -1): time.sleep(0.1) # Waits until drone has done its reset
print ("Battery: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1])) # Gives a battery-status
drone.useDemoMode(True) # Just give me 15 basic dataset per second (is default anyway)
##### Mainprogram begin #####
drone.setConfigAllID() # Go to multiconfiguration-mode
drone.sdVideo() # Choose lower resolution (hdVideo() for...well, guess it)
drone.frontCam() # Choose front view
CDC = drone.ConfigDataCount
while CDC == drone.ConfigDataCount: time.sleep(0.0001) # Wait until it is done (after resync is done)
drone.startVideo() # Start video-function
drone.showVideo() # Display the video
##### And action !
print ("Use <space> to toggle front- and groundcamera, any other key to stop")
IMC = drone.VideoImageCount # Number of encoded videoframes
stop = False
ground = False
while not stop:
while drone.VideoImageCount == IMC: time.sleep(0.01) # Wait until the next video-frame
IMC = drone.VideoImageCount
key = drone.getKey() # Gets a pressed key
if key==" ":
if ground: ground = False
else: ground = True
drone.groundVideo(ground) # Toggle between front- and groundcamera. Hint: options work for all videocommands
elif key and key != " ": stop = True
| [
"kshaba@gmail.com"
] | kshaba@gmail.com |
2edce3beb41e028a8b6e04ee2df6b6902f74f0a5 | feee01a7ba1ccc745060cf8233168934c5ef6426 | /apps/courses/migrations/0009_course_teacher.py | 33de7421a3c85d8ba99c73f05edd594c8b124b27 | [] | no_license | carpenter-zhang/muke | bfa7fa320536466de3f12223b4ae98dc635ceba6 | 7529f4e6be5c1919b86194093e45f574a0ffa89a | refs/heads/master | 2021-08-26T08:15:35.907867 | 2017-11-22T13:15:06 | 2017-11-22T13:15:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-11-08 16:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizations', '0006_teacher_image'),
('courses', '0008_video_video_times'),
]
operations = [
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Teacher', verbose_name='教师'),
),
]
| [
"596864319@qq.com"
] | 596864319@qq.com |
2eb73db4561ba05eee120641d58dcfdf2e0c885b | 333632d57adb95541042769fed09b9698996f0b2 | /fullstack/venv_linux_auth0/bin/rst2xml.py | 483278b51727939b4ecb68f5f5a86856858bfb7c | [] | no_license | NginxNinja/Udacity_FullStack | c71f0ef323d794767c59b7316f845744ec9d9773 | ec028682633e4979a5938956ff92d336270a57f3 | refs/heads/master | 2021-05-19T15:12:05.720426 | 2020-05-24T00:31:33 | 2020-05-24T00:31:33 | 251,841,704 | 0 | 0 | null | 2020-04-18T06:18:28 | 2020-03-31T22:55:39 | Python | UTF-8 | Python | false | false | 622 | py | #!/vagrant/venv_linux_auth0/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| [
"dummy.kenmen2@gmail.com"
] | dummy.kenmen2@gmail.com |
a2ebd1a75d40d5243131985f9e179274ccf490e4 | e3efa07f9d804b7cb65ff79034f5565953f369df | /day12.py | c7105577cb87ead3b6059b6f7b882ff7dfdab838 | [] | no_license | fursovia/adventofcode2020 | decb277b7eedbbf5229e8c2d80dfe74bede4d234 | 8d65174fc6908736cc5609ea3115eda9d9586f63 | refs/heads/main | 2023-01-30T21:35:17.927959 | 2020-12-13T17:33:55 | 2020-12-13T17:33:55 | 317,532,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | from dataclasses import dataclass
from typing import List, Tuple
from collections import OrderedDict
RAW = """F10
N3
F7
R90
F11"""
CARDINAL_MAPPING = OrderedDict(
{
"N": (0, +1),
"E": (-1, 0),
"S": (0, -1),
"W": (+1, 0),
}
)
DIRECTIONS = list(CARDINAL_MAPPING.values())
@dataclass
class Navigation:
actions: List[str]
current_direction: Tuple[int, int] = (-1, 0)
current_position: Tuple[int, int] = (0, 0)
waypoint: Tuple[int, int] = (-10, 1)
def move(self):
for action in self.actions:
value = int(action[1:])
if action.startswith("R") or action.startswith("L"):
self.change_direction(action)
continue
elif action.startswith("F"):
direction = self.current_direction
else:
direction = CARDINAL_MAPPING[action[0]]
to_step = tuple(di * value for di in direction)
self.current_position = tuple(map(sum, zip(self.current_position, to_step)))
def move2(self):
for action in self.actions:
value = int(action[1:])
if action.startswith("F"):
to_step = tuple(di * value for di in self.waypoint)
self.current_position = tuple(map(sum, zip(self.current_position, to_step)))
else:
self.change_waypoint_position(action)
def change_waypoint_position(self, action: str):
value = int(action[1:])
if action.startswith("R"):
for _ in range(value // 90):
self.waypoint = -self.waypoint[1], self.waypoint[0]
elif action.startswith("L"):
for _ in range(value // 90):
self.waypoint = self.waypoint[1], -self.waypoint[0]
else:
direction = CARDINAL_MAPPING[action[0]]
to_step = tuple(di * value for di in direction)
self.waypoint = tuple(map(sum, zip(self.waypoint, to_step)))
def change_direction(self, action: str):
turn, degrees = action[0], int(action[1:])
assert turn == "L" or turn == "R"
num_rotations = int(degrees / 90)
if turn == "L":
num_rotations = -num_rotations
current_index = DIRECTIONS.index(self.current_direction)
self.current_direction = DIRECTIONS[(current_index + num_rotations) % 4]
def get_distance(self) -> int:
return abs(self.current_position[0]) + abs(self.current_position[1])
navigation = Navigation(RAW.split("\n"))
navigation.move()
assert navigation.get_distance() == 25
navigation = Navigation(RAW.split("\n"))
navigation.move2()
assert navigation.get_distance() == 286
with open("data/day12.txt") as f:
data = f.read().split("\n")
navigation = Navigation(data)
navigation.move()
distance = navigation.get_distance()
print(distance)
navigation = Navigation(data)
navigation.move2()
distance = navigation.get_distance()
print(distance)
| [
"fursov.ia@gmail.com"
] | fursov.ia@gmail.com |
138b1b5eaed3c0e2c160f52a49e9ed9f1a6fd14b | 65331b368b8956c4864e95e21e6b9c2b3c715a2f | /__init__.py | 951540ce32097ac761cc9accb9ab3842ef23d029 | [] | no_license | yamoimeda/diccionario-guna | 83358ded73d79282457ff21984cdbff3028e1a82 | e3041d22721cab9e97e9ebd45d3456d9960aecfe | refs/heads/master | 2021-06-27T08:56:02.247787 | 2021-02-11T23:27:30 | 2021-02-11T23:27:30 | 203,426,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | from flask import Flask,redirect,url_for,render_template,request,jsonify
from pymongo import MongoClient
import sqlite3 as sql
app = Flask(__name__)
#cliente = MongoClient("localhost", 27017)
#db = cliente.diccionarios
@app.route('/',methods=['GET','POST'])
def index():
return render_template('inicios.html')
@app.route('/resultado',methods=['GET','POST'])
def resultados():
if request.method == 'POST':
palabra = request.form['palabra']
conn = sql.connect("diccionarioguna.db")
conn.row_factory = sql.Row
cur = conn.cursor()
cur.execute('''SELECT significado FROM palabras where palabra = ?''',(palabra,))
signifi = cur.fetchall()
if len(signifi) == 0:
significado = 'no hay coincidencia'
else:
for ab in signifi:
significado = ab['significado']
return significado
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
450652c8cc5a188c7692244ac62b7cc5f3cd3443 | 23be24e92420af66307121468136570234aceebd | /Problems/What day is it/task.py | 49c6dff7b35a4222d7bf355ce9d79d44fb131b7e | [] | no_license | sanqit/credit_calculator | c309013f6639e886ae2a257b6c37c401d2d7b1fa | 1b2aaab271b985ec2df47fe6409a08e4dd180a0f | refs/heads/master | 2022-11-15T05:30:49.412246 | 2020-07-04T20:50:04 | 2020-07-04T20:50:04 | 277,182,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | offset = int(input())
time = 10.5
n = time + offset
if n < 0:
print("Monday")
elif n > 24:
print("Wednesday")
else:
print("Tuesday")
| [
"admin@sanqit.ru"
] | admin@sanqit.ru |
0e13ea228a661ee0d8e2c5bfce784e4d705a8f66 | 09b0075f56455d1b54d8bf3e60ca3535b8083bdc | /WideResnet.py | 595e4f69f1baa13a9f27f80fdb61e54773195de4 | [] | no_license | YanYan0716/MPL | e02c1ddf036d6019c3596fd932c51c3a14187f5e | 6ad82b050ec1ed81987c779df2dddff95dc1cde5 | refs/heads/master | 2023-04-17T23:05:54.164840 | 2021-05-07T01:14:49 | 2021-05-07T01:14:49 | 331,491,485 | 11 | 6 | null | null | null | null | UTF-8 | Python | false | false | 7,157 | py | import os
from abc import ABC
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
import config
class BasicBlock(layers.Layer):
def __init__(self, in_channels, out_channels, stride, dropout, name, trainable):
super(BasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.dropout = dropout
# name = name
self.trainable = trainable
self.bn1 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn1'
)
self.relu1 = layers.LeakyReLU(alpha=0.2)
self.conv1 = layers.Conv2D(
filters=self.out_channels,
kernel_size=3,
strides=self.stride,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_conv1',
)
self.bn2 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn2'
)
self.relu2 = layers.LeakyReLU(alpha=0.2)
self.dropout = layers.Dropout(
rate=self.dropout,
trainable=self.trainable,
name=name+'_dropout',
)
self.conv2 = layers.Conv2D(
filters=self.out_channels,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_conv2',
)
if self.stride != 1 or self.in_channels != self.out_channels:
self.short_cut_relu = layers.LeakyReLU(alpha=0.2)
self.short_cut = layers.Conv2D(
filters=self.out_channels,
kernel_size=1,
strides=self.stride,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_shortcut'
)
self.add = layers.Add(name=name+'_add')
def call(self, inputs, **kwargs):
residual = inputs
out = self.bn1(inputs)
if self.stride != 1 or self.in_channels != self.out_channels:
residual = out
out = self.relu1(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv2(out)
if self.stride != 1 or self.in_channels != self.out_channels:
residual = self.short_cut_relu(residual)
residual = self.short_cut(residual)
# else:
# shortcut = out
out = self.add([residual, out])
return out
class WideResnet(keras.Model):
def __init__(self, k=[16, 32, 64, 128], name='wider'):
super(WideResnet, self).__init__(name=name)
self.k = k
self.dropout = config.DROPOUT
self.drop = layers.Dropout(
rate=config.DROPOUT,
trainable=self.trainable,
name=name+'_dropout',
)
self.conv1 = layers.Conv2D(
filters=k[0],
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name + '_conv1',
)
self.Basic1 = BasicBlock(in_channels=k[0], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic1', trainable=True)
self.Basic2 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic2', trainable=True)
self.Basic3 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic3', trainable=True)
self.Basic4 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic4', trainable=True)
self.Basic5 = BasicBlock(in_channels=k[1], out_channels=k[2], stride=2, dropout=self.dropout, name=name+'_Basic5', trainable=True)
self.Basic6 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic6', trainable=True)
self.Basic7 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic7', trainable=True)
self.Basic8 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic8', trainable=True)
self.Basic9 = BasicBlock(in_channels=k[2], out_channels=k[3], stride=2, dropout=self.dropout, name=name+'_Basic9', trainable=True)
self.Basic10 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic10', trainable=True)
self.Basic11 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic11', trainable=True)
self.Basic12 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic12', trainable=True)
self.bn1 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn1'
)
self.relu1 = layers.LeakyReLU(alpha=0.2)
self.avgpool = layers.GlobalAveragePooling2D(name=name+'_avgpool')
self.dense = layers.Dense(
units=config.NUM_CLASS,
# kernel_initializer=keras.initializers.RandomNormal(mean=0., stddev=1.),
# activation='softmax',
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
name=name+'_dense',
)
def call(self, inputs, training=None, mask=None):
x = self.conv1(inputs)
x = self.Basic1(x)
x = self.Basic2(x)
x = self.Basic3(x)
x = self.Basic4(x)
x = self.Basic5(x)
x = self.Basic6(x)
x = self.Basic7(x)
x = self.Basic8(x)
x = self.Basic9(x)
x = self.Basic10(x)
x = self.Basic11(x)
x = self.Basic12(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.avgpool(x)
x = self.drop(x)
out = self.dense(x)
return out
def model(self):
input = keras.Input(shape=(32, 32, 3), dtype=tf.float32)
return keras.Model(inputs=input, outputs=self.call(input))
if __name__ == '__main__':
img = tf.random.normal([1, 32, 32, 3])
model = WideResnet().model()
model.summary() | [
"yanqian0716@gmail.com"
] | yanqian0716@gmail.com |
d7df85c05060a2aec8d54ca9acc857b79150ca15 | ba466ae34666eb71584cc3a808bdd78f5f555d2f | /exercicios/ex37.py | 2ccae9fb556d61448789994befc172790a367837 | [] | no_license | gilmargjs/cursoEmVideoPython | 8fa30984c00abd3ca331aab3de3d8e017d451e52 | cb15343bf5c884b2045e6a35b949cd02c3e8321c | refs/heads/main | 2023-06-12T22:48:59.398215 | 2021-06-30T20:52:55 | 2021-06-30T20:52:55 | 381,829,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | """programa que laeia um número intgeiro qualquer e peça
para o usuario escolher qual será a base da conversão:
-1 para binario
-2 para octal
-3 para hexadecimal
"""
print('='*30)
print('SISTEMA DE CONVERÇÃO DE BASES')
print('='*30)
num = int(input("Digite um número: "))
print('''Escolha uma das Bases para Conversão:
[ 1 ] Converter para BINÁRIO
[ 2 ] Converter para OCTAL
[ 3 ] Converter para HEXADECIMAL''')
print('='*30)
opcao = int(input("Sua Opção: "))
if opcao == 1:
print(f'{num} convertido para BINÁRIO é igual a {bin(num)[2:]}')
elif opcao == 2:
print(f'{num} convertido para OCTAL é igual a {oct(num)[2:]}')
elif opcao == 3:
print(f'{num} convertido para HEXADECIMAL é igual a {hex(num)[2:]}')
else:
print('escolha uma opção valida')
print('='*30)
| [
"gilmarjose2014@gmail.com"
] | gilmarjose2014@gmail.com |
b5d716b2740e66732492a580f7db8280232f261e | d3d8acc788bd3a8d7e5f861ad87c4d802723062b | /test/step3_descope200MCHF_HLT.py | c2272355f19530f27df01562b14bf70d1dee3ae4 | [] | no_license | calabria/L1IntegratedMuonTrigger | 27ff0bde46208f84595423ec375080979fbe4c62 | 05a368b8d04f84b675d40445555f2cacfd135e4e | refs/heads/master | 2021-01-24T21:57:42.232290 | 2015-08-11T11:52:35 | 2015-08-11T11:52:35 | 38,485,204 | 0 | 2 | null | 2015-08-11T11:52:35 | 2015-07-03T09:40:57 | Python | UTF-8 | Python | false | false | 4,607 | py | # Auto generated configuration file
# using:
# Revision: 1.20
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step3_descope200MCHF --fileout file:out_hlt_descope200MCHF.root --mc --eventcontent RECOSIM --step HLT --customise RecoParticleFlow/PandoraTranslator/customizeHGCalPandora_cff.cust_2023HGCalPandoraMuon,Configuration/DataProcessing/Utils.addMonitoring,L1Trigger/L1IntegratedMuonTrigger/phase2DescopingScenarios.descope200MCHF --datatier GEN-SIM-RECO --conditions PH2_1K_FB_V6::All --magField 38T_PostLS1 --filein file:/afs/cern.ch/work/d/dildick/public/GEM/MuonPhaseIIScopeDoc/CMSSW_6_2_0_SLHC26_patch3/src/001B71CC-0F38-E511-BEE2-002618943918.root --geometry Extended2023HGCalMuon,Extended2023HGCalMuonReco --no_exec -n 10
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023HGCalMuonReco_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('HLTrigger.Configuration.HLT_GRun_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring('file:/afs/cern.ch/work/d/dildick/public/GEM/MuonPhaseIIScopeDoc/CMSSW_6_2_0_SLHC26_patch3/src/001B71CC-0F38-E511-BEE2-002618943918.root')
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.20 $'),
annotation = cms.untracked.string('step3_descope200MCHF nevts:10'),
name = cms.untracked.string('Applications')
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('file:out_hlt_descope200MCHF.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM-RECO')
)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'PH2_1K_FB_V6::All', '')
# Path and EndPath definitions
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule()
process.schedule.extend(process.HLTSchedule)
process.schedule.extend([process.endjob_step,process.RECOSIMoutput_step])
# customisation of the process.
# Automatic addition of the customisation function from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff
from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff import cust_2023HGCalPandoraMuon
#call to customisation function cust_2023HGCalPandoraMuon imported from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff
process = cust_2023HGCalPandoraMuon(process)
# Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC
from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC
#call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC
process = customizeHLTforMC(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# Automatic addition of the customisation function from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios
from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios import descope200MCHF
#call to customisation function descope200MCHF imported from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios
process = descope200MCHF(process)
# End of customisation functions
| [
"sven.dildick@cern.ch"
] | sven.dildick@cern.ch |
b09fdc0bc43f30b2b51c8893afcf2024ef86d619 | 0009c76a25c89a0d61d3bc9e10071da58bdfaa5a | /py/ztools/mtp/mtp_tools.py | 0496f5ae683026478bdcc98faf9cc9c89b3e14a9 | [
"MIT"
] | permissive | julesontheroad/NSC_BUILDER | 84054e70a80b572088b0806a47ceb398302451b5 | e9083e83383281bdd9e167d3141163dcc56b6710 | refs/heads/master | 2023-07-05T05:23:17.114363 | 2021-11-15T19:34:47 | 2021-11-15T19:34:47 | 149,040,416 | 1,249 | 143 | MIT | 2022-12-15T03:19:33 | 2018-09-16T22:18:01 | Python | UTF-8 | Python | false | false | 8,313 | py | import os
from listmanager import folder_to_list
from listmanager import parsetags
from pathlib import Path
import Print
import shutil
from mtp.wpd import is_switch_connected
import sys
import subprocess
from python_pick import pick
from python_pick import Picker
squirrel_dir=os.path.abspath(os.curdir)
NSCB_dir=os.path.abspath('../'+(os.curdir))
if os.path.exists(os.path.join(squirrel_dir,'ztools')):
NSCB_dir=squirrel_dir
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
ztools_dir=os.path.join(NSCB_dir,'ztools')
squirrel_dir=ztools_dir
elif os.path.exists(os.path.join(NSCB_dir,'ztools')):
squirrel_dir=squirrel_dir
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
else:
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
testroute1=os.path.join(squirrel_dir, "squirrel.py")
testroute2=os.path.join(squirrel_dir, "squirrel.exe")
urlconfig=os.path.join(zconfig_dir,'NUT_DB_URL.txt')
isExe=False
if os.path.exists(testroute1):
squirrel=testroute1
isExe=False
elif os.path.exists(testroute2):
squirrel=testroute2
isExe=True
bin_folder=os.path.join(ztools_dir, 'bin')
nscb_mtp=os.path.join(bin_folder, 'nscb_mtp.exe')
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if not os.path.exists(cachefolder):
os.makedirs(cachefolder)
games_installed_cache=os.path.join(cachefolder, 'games_installed.txt')
autoloader_files_cache=os.path.join(cachefolder, 'autoloader_files.txt')
sd_xci_cache=os.path.join(cachefolder, 'sd_xci.txt')
valid_saves_cache=os.path.join(cachefolder, 'valid_saves.txt')
mtp_source_lib=os.path.join(zconfig_dir,'mtp_source_libraries.txt')
mtp_internal_lib=os.path.join(zconfig_dir,'mtp_SD_libraries.txt')
storage_info=os.path.join(cachefolder, 'storage.csv')
download_lib_file = os.path.join(zconfig_dir, 'mtp_download_libraries.txt')
sx_autoloader_db=os.path.join(zconfig_dir, 'sx_autoloader_db')
def gen_sx_autoloader_files_menu():
print('***********************************************')
print('SX AUTOLOADER GENERATE FILES FROM HDD OR FOLDER')
print('***********************************************')
print('')
folder=input("Input a drive path: ")
if not os.path.exists(folder):
sys.exit("Can't find location")
title = 'Target for autoloader files: '
options = ['HDD','SD']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='HDD':
type='hdd'
else:
type='sd'
title = 'Push files after generation?: '
options = ['YES','NO']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='YES':
push=True
else:
push=False
title = "Ensure files can't colide after transfer?: "
options = ['YES','NO']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='YES':
no_colide=True
else:
no_colide=False
gen_sx_autoloader_files(folder,type=type,push=push,no_colide=no_colide)
def gen_sx_autoloader_files(folder,type='hdd',push=False,no_colide=False):
gamelist=folder_to_list(folder,['xci','xc0'])
if type=='hdd':
SD_folder=os.path.join(sx_autoloader_db, 'hdd')
else:
SD_folder=os.path.join(sx_autoloader_db, 'sd')
if not os.path.exists(sx_autoloader_db):
os.makedirs(sx_autoloader_db)
if not os.path.exists(SD_folder):
os.makedirs(SD_folder)
for f in os.listdir(SD_folder):
fp = os.path.join(SD_folder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
print(' * Generating autoloader files')
try:
for g in gamelist:
try:
fileid,fileversion,cctag,nG,nU,nD,baseid=parsetags(g)
if fileid=='unknown':
continue
tfile=os.path.join(SD_folder,fileid)
fileparts=Path(g).parts
if type=='hdd':
new_path=g.replace(fileparts[0],'"usbhdd:/')
else:
new_path=g.replace(fileparts[0],'"sdmc:/')
new_path=new_path.replace('\\','/')
with open(tfile,'w') as text_file:
text_file.write(new_path)
except:pass
print(' DONE')
if push==True:
if not is_switch_connected():
sys.exit("Can't push files. Switch device isn't connected.\nCheck if mtp responder is running!!!")
print(' * Pushing autoloader files')
if type=='hdd':
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\hdd"
else:
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\sd"
process=subprocess.Popen([nscb_mtp,"TransferFolder","-ori",SD_folder,"-dst",destiny,"-fbf","true"])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if no_colide==True:
cleanup_sx_autoloader_files()
except BaseException as e:
Print.error('Exception: ' + str(e))
pass
def cleanup_sx_autoloader_files():
from mtp_game_manager import retrieve_xci_paths
from mtp_game_manager import get_gamelist
try:
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
except:pass
if not is_switch_connected():
sys.exit("Can't push files. Switch device isn't connected.\nCheck if mtp responder is running!!!")
retrieve_xci_paths()
print(" * Retriving autoloader files in device. Please Wait...")
process=subprocess.Popen([nscb_mtp,"Retrieve_autoloader_files","-tfile",autoloader_files_cache,"-show","false"],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if os.path.exists(autoloader_files_cache):
print(" Success")
else:
sys.exit("Autoloader files weren't retrieved properly")
gamelist=get_gamelist(file=sd_xci_cache)
autoloader_list=get_gamelist(file=autoloader_files_cache)
sd_xci_ids=[]
for g in gamelist:
try:
fileid,fileversion,cctag,nG,nU,nD,baseid=parsetags(g)
sd_xci_ids.append(fileid)
except:pass
files_to_remove=[]
for f in autoloader_list:
fileparts=Path(f).parts
if 'sdd' in fileparts and not (fileparts[-1] in sd_xci_ids):
files_to_remove.append(f)
elif 'hdd' in fileparts and (fileparts[-1] in sd_xci_ids):
files_to_remove.append(f)
print(" * The following files will be removed")
for f in files_to_remove:
print(" - "+f)
for f in files_to_remove:
process=subprocess.Popen([nscb_mtp,"DeleteFile","-fp",f])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
def push_sx_autoloader_libraries():
if not is_switch_connected():
sys.exit("Can't push files. Switch device isn't connected.\nCheck if mtp responder is running!!!")
title = "Ensure files can't colide after transfer?: "
options = ['YES','NO']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='YES':
no_colide=True
else:
no_colide=False
print(' * Pushing autoloader files in hdd folder')
HDD_folder=os.path.join(sx_autoloader_db, 'hdd')
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\hdd"
process=subprocess.Popen([nscb_mtp,"TransferFolder","-ori",HDD_folder,"-dst",destiny,"-fbf","true"])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
print(' * Pushing autoloader files in SD folder')
SD_folder=os.path.join(sx_autoloader_db, 'sd')
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\sd"
process=subprocess.Popen([nscb_mtp,"TransferFolder","-ori",SD_folder,"-dst",destiny,"-fbf","true"])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if no_colide==True:
cleanup_sx_autoloader_files()
def get_nca_ticket(filepath,nca):
import Fs
from binascii import hexlify as hx, unhexlify as uhx
if filepath.endswith('xci') or filepath.endswith('xcz'):
f = Fs.Xci(filepath)
check=False;titleKey=0
for nspF in f.hfs0:
if str(nspF._path)=="secure":
for file in nspF:
if (file._path).endswith('.tik'):
titleKey = file.getTitleKeyBlock().to_bytes(16, byteorder='big')
check=f.verify_key(nca,str(file._path))
if check==True:
break
return check,titleKey
elif filepath.endswith('nsp') or filepath.endswith('nsz'):
f = Fs.Nsp(filepath)
check=False;titleKey=0
for file in f:
if (file._path).endswith('.tik'):
titleKey = file.getTitleKeyBlock().to_bytes(16, byteorder='big')
check=f.verify_key(nca,str(file._path))
if check==True:
break
return check,titleKey | [
"42461174+julesontheroad@users.noreply.github.com"
] | 42461174+julesontheroad@users.noreply.github.com |
6537118072122509e9adad7738eee5616a1b24dd | fc83fc10fcc509316e612d73bd40a81d3ca0a2e6 | /tests/nd_gaussian_multiprocessing.py | 1f8c698393e3a088d991eb3484785a391dc3c783 | [
"MIT"
] | permissive | DimitriMisiak/mcmc-red | 47dfb7e0664205da55fa463df77851722082e3c3 | caae0ce39d082e578176a5078a9184980b0851c3 | refs/heads/main | 2023-06-19T04:10:42.385862 | 2019-07-05T07:45:01 | 2019-07-05T07:45:01 | 387,757,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,928 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Handy MCMC scripts.
Test for the different fit method (mcmc, ptmcmc, minimizer).
Author:
Dimitri Misiak (misiak@ipnl.in2p3.fr)
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import scipy.signal as sgl
from os import path
import scipy.optimize as op
import mcmc_red as mcr
# close all plots
plt.close('all')
nsample = 1000
ndim = 4
SCALE = 'log'
### LINEAR SCALE
if SCALE == 'linear':
mu = np.random.uniform(-10, 10, ndim)
sigma = np.random.uniform(0, 10, ndim)
bounds = ((-20, 20),) * ndim
### LOG SCALE
elif SCALE == 'log':
mu_generator = np.random.uniform(-6, 0, ndim)
mu = 10**mu_generator
sigma = mu/10
bounds = ((1e-7, 1e1),) * ndim
else:
raise Exception('SCALE not set properly!')
print("Generating blob at mu={0} and sigma={1}".format(mu, sigma))
blob = np.random.normal(mu, sigma, (nsample, ndim))
print("Checking")
print("mean =", np.mean(blob, axis=0))
print("std =", np.std(blob, axis=0))
def chi2(param):
return mcr.chi2_simple(blob, param, sigma)
#def chi2(param):
# x2 = np.sum( (blob - np.array(param))**2 / np.array(sigma)**2 )
# return x2
condi = None
# XXX MCMC
# save directory
sampler_path = 'mcmc_sampler/autosave'
# extracts the sup bounds and the inf bounds
bounds = list(bounds)
binf = list()
bsup = list()
for b in bounds:
inf, sup = b
binf.append(inf)
bsup.append(sup)
binf = np.array(binf)
bsup = np.array(bsup)
# additionnal constrain as function of the parameters
if condi == None:
condi = lambda p: True
# Loglikelihood function taking into accounts the bounds
def loglike(x):
""" Loglikelihood being -chi2/2.
Take into account the bounds.
"""
cinf = np.sum(x<binf)
csup = np.sum(x>bsup)
if cinf == 0 and csup == 0 and condi(x) == True:
# return -0.5*aux(np.power(10,x))
return -0.5*chi2(x)
else:
return -np.inf
# running the mcmc analysis
sampler = mcr.mcmc_sampler_multi(loglike, bounds, nsteps=1000, path=sampler_path, threads=2, scale=SCALE)
#nwalkers=None
#nsteps=10000
#threads=4
##############################################################################
## extracts the sup bounds and the inf bounds
#bounds = list(bounds)
#binf = list()
#bsup = list()
#for b in bounds:
# inf, sup = b
# binf.append(inf)
# bsup.append(sup)
#binf = np.array(binf)
#bsup = np.array(bsup)
#
#condi = None
## additionnal constrain as function of the parameters
#if condi == None:
# condi = lambda p: True
#
## Loglikelihood function taking into accounts the bounds
#def loglike(x):
# """ Loglikelihood being -chi2/2.
# Take into account the bounds.
# """
# cinf = np.sum(x<binf)
# csup = np.sum(x>bsup)
# if cinf == 0 and csup == 0 and condi(x) == True:
## return -0.5*aux(np.power(10,x))
# return -0.5*chi2(x)
# else:china moon
# return -np.inf
#
## number of parameters/dimensions
#ndim = len(bounds)
#
## default nwalkers
#if nwalkers == None:
# nwalkers = 10 * ndim
#
## walkers are uniformly spread in the parameter space
#pos = list()
#for n in xrange(nwalkers):
# accept = False
# while not accept:
# new_pos = [
# np.random.uniform(low=l, high=h) for l,h in zip(binf, bsup)
# ]
# accept = condi(new_pos)
# pos.append(new_pos)
#
## MCMC analysis
#sampler = emcee.EnsembleSampler(nwalkers, ndim, loglike, threads=threads)
#sampler.run_mcmc(pos, nsteps, rstate0=np.random.get_state())
#############################################################################
# # loading the mcmc results
logd, chain, lnprob, acc = mcr.get_mcmc_sampler(sampler_path)
lab = tuple(['$\mu${}'.format(i) for i in range(ndim)])
dim = int(logd['dim'])
xopt, inf, sup = mcr.mcmc_results(dim, chain, lnprob, acc, lab,
scale=SCALE, savedir=sampler_path)
print(xopt, inf, sup)
| [
"dimitrimisiak@gmail.com"
] | dimitrimisiak@gmail.com |
151d04d5fcd1d701ef91676c6174b1bebb1baf8c | 208db76b669686f782d59ec393efdb11e3d3cbbc | /test.l0.py | 0f6d3d40bd459a45e68347a3ddc466043ba27139 | [] | no_license | vsevolod-oparin/stream-dynamic-components | 149542d1dd754cd3f9a7e7bfdff118612e3fb21a | ef5e40cb07f568205a140b78a77067f32d4f34cd | refs/heads/master | 2021-01-10T01:32:21.968060 | 2016-02-09T16:00:41 | 2016-02-09T16:00:41 | 51,305,791 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,774 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import random
from algo.l0sample import Rec1 as Rec1
from algo.l0sample import RecS as RecS
from algo.l0sample import RecGeneral as RecGeneral
class TestRec1(unittest.TestCase):
def test_zero(self):
r = Rec1(100, 0.001)
self.assertFalse(r.correct())
def test_one(self):
r = Rec1(100, 0.001)
r.update(17, 42)
self.assertTrue(r.correct())
self.assertEqual(r.recover(), (17, 42))
def test_two(self):
r = Rec1(100, 0.001)
r.update(1, 1)
r.update(2, 2)
self.assertFalse(r.correct())
def test_many(self):
r = Rec1(100, 0.001)
for i in xrange(100):
r.update(i, i + 17)
r.update(17, 42)
for i in xrange(100):
r.update(i, -(i + 17))
self.assertTrue(r.correct())
self.assertEqual(r.recover(), (17, 42))
class TestRecS(unittest.TestCase):
def test_touched(self):
r = RecS(100, 5, 0.01)
self.assertFalse(r.touched())
def test_zero(self):
r = RecS(100, 5, 0.01)
self.assertEqual(r.recover(), dict())
def test_one(self):
r = RecS(100, 5, 0.01)
r.update(17, 42)
for i in xrange(100):
ind, val = random.randint(0, 99), random.randint(1, 25)
r.update(ind, val)
r.update(ind, -val)
self.assertTrue(r.touched())
self.assertEqual(r.recover(), {17: 42})
def test_five(self):
r = RecS(100, 5, 0.01)
updates = dict([(17 + i, 42 + i) for i in xrange(5)])
for k in updates.keys():
r.update(k, updates[k])
for i in xrange(100):
ind, val = random.randint(0, 99), random.randint(1, 25)
r.update(ind, val)
r.update(ind, -val)
self.assertEqual(r.recover(), updates)
class TestRecGeneral(unittest.TestCase):
def test_zero(self):
r = RecGeneral(100, 0.01)
self.assertEqual(r.sample(), (0, 0))
def test_many(self):
size = 20
rs = [RecGeneral(size, 0.01) for i in xrange(10)]
vals = [0 for i in xrange(size)]
for i in xrange(size):
ind, val = random.randint(0, size - 1), random.randint(-5, 5)
vals[ind] += val
for r in rs:
r.update(ind, val)
for i in xrange(size):
if i % 2 == 1 and vals[i] != 0:
for r in rs:
r.update(i, -vals[i])
vals[i] -= vals[i]
answers = [r.sample() for r in rs]
print vals
print answers
for a in answers:
self.assertTrue(a[0] % 2 == 0 and a[1] == vals[a[0]])
if __name__ == '__main__':
unittest.main() | [
"oparin.vsevolod@gmail.com"
] | oparin.vsevolod@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.