text stringlengths 38 1.54M |
|---|
from collections import namedtuple
import json
# ScrapedData = namedtuple('ScrapedData', 'title code employment_2016 employment_2026 change_number change_percent anual_wage')
# s = ScrapedData(title='Management Occupations', code='11-0000', employment_2016='9,533.1', employment_2026='10,411.5',
# change_number='878.3', change_percent='9.4', anual_wage='100,790')
class Occupation(object):
# init
def __init__(self, data):
# set our data constants
self.title = data[0]# data.title
self.code = data[1] # data.code
self.employment_2016 = data[2] # data.employment_2016
self.employment_2026 = data[3] # data.employment_2026
self.change_number = data[4] # data.change_number
self.change_percent = data[5] # data.change_percent
self.anual_wage = data[6] # data.anual_wage
# json data
def jsonData(self):
# takes class properties into json format
json_data = {
self.title : [
{'code' : self.code },
{'employment' : [
{'employment_2016': self.employment_2016},
{'employment_2016': self.employment_2026}
]},
{'change_employment' : [
{'change_number' : self.change_number},
{'change_percentage': self.change_percent}
]},
{'anual_wage' : self.anual_wage}
]
}
# return json data
return json_data
# in order for out class to be a json object
def jsonDefault(object):
return object.__dict__
# # class instance
# employee = Occupation(s)
# # write it in json file
# filename = "careers.json"
# f = open(filename, "w")
# jsonstuff = json.dumps(employee.jsonData(), indent=4, default=jsonDefault)
# f.write(jsonstuff) |
"""
Created on 12:08, May. 23rd, 2021
Author: fassial
Filename: test_inputs.py
"""
# local dep
import stimulus
__all__ = [
"test_poisson_input",
]
## define test func
# define test_poisson_input func
def test_poisson_input():
# get stim
stim, _ = stimulus.inputs.poisson_input(
duration = 100
)
# display stim
print(stim.shape) # (duration / dt, size)
|
#!/usr/bin/python3
"""Island_perimeter in python"""
def island_perimeter(grid):
"""That returns the perimeter of the island described in grid"""
base_actual = 0
base_max = 0
altura = 0
for row in grid:
base_actual = 0
for column in row:
if column == 1:
base_actual += 1
if base_actual > base_max:
base_max = base_actual
if base_actual != 0:
altura += 1
perimeter = 2 * (base_max + altura)
return perimeter
|
from multiprocessing import Process, cpu_count, freeze_support, Queue
import numpy as np
from math import ceil, floor
import time
def next(seed):
seed = (seed * 0x5deece66d + 0xb) & ((1 << 48) - 1)
retval = seed >> (48 - 31)
if retval & (1 << 31):
retval -= (1 << 32)
return retval, seed
def nextInt(n, seed):
seed = (seed ^ 0x5deece66d) & ((1 << 48) - 1)
retval, seed = next(seed)
if not (n & (n - 1)):
return (n * retval) >> 31
else:
bits = retval
val = bits % n
while (bits - val + n - 1) < 0:
bits, seed = next(seed)
val = bits % n
return val
def javaInt64(val):
return ((val + (1 << 63)) % (1 << 64)) - (1 << 63)
def javaInt32(val):
return ((val + (1 << 31)) % (1 << 32)) - (1 << 31)
def itsASlime(cx, cz, worldseed):
seed= javaInt64(worldseed + javaInt32(cx * cx * 4987142) + javaInt32(cx * 5947611) + javaInt32(cz * cz) * 4392871 + javaInt32(cz * 389711) ^ 987234911)
return not nextInt(10, seed)
def initialize(r, s, w, offset):
a = np.zeros((s, s), dtype=bool)
for i in range(s):
for j in range(s):
a[i][j] = itsASlime(-r + j, i + offset, w)
return a
def goDown(a, nbr, s, x, z, w):
a = a[nbr:]
b = np.zeros((nbr, s), dtype=bool)
for i in range(nbr):
for j in range(s):
b[i][j] = itsASlime(x + j, z + s + i, w)
return np.concatenate((a, b))
def goRight(a, nbr, s, x, z, w):
for i in range(s):
for j in range(nbr):
a[i] = np.concatenate((a[i][1:], [itsASlime(x + s + j, z + i, w)]))
return a
def checkMask(mask, layer):
return np.array_equal(mask, layer)
def workers(mask, index, offset, seed, size, radius, cores, result):
block = initialize(radius, size, seed, offset * cores + index)
if checkMask(mask, block):
result.put((0, offset * cores + index))
for i in range(-radius, radius - 1):
block = goRight(block, 1, size, i, offset * cores + index, seed)
if checkMask(mask, block):
result.put((i + 1, offset * cores + index))
def main(radius, seed, size, mask):
assert size, radius > 0
result = []
processPool = []
result_queue = Queue()
cores = cpu_count()
t = time.time()
for offset in range(-floor(radius / cores), ceil(radius / cores)):
for i in range(cpu_count()):
p = Process(target=workers, args=(mask, i, offset, seed, size, radius, cores, result_queue))
p.daemon = True
p.start()
processPool.append(p)
for el in processPool:
p.join()
result_queue.put("DONE")
while True:
temp = result_queue.get()
if temp == "DONE":
break
result.append(temp)
if not offset % cores:
print("{} %".format(round(offset / (2 * radius / cores) * 100 + 50, 2)))
print(time.time()-t)
t = time.time()
print(result)
def start():
t = time.time()
freeze_support()
size = 16
seed = 2
radius = 20000
mask = np.zeros((size, size), dtype=bool)
main(radius, seed, size, mask)
print(time.time() - t)
print("The results are in chunks compared to 0 0, also you need to read it as chunkX,chunkZ")
if __name__ == '__main__':
freeze_support()
start()
|
from EMAIL import *
from fra import *
from time import gmtime, strftime
import sys
import logging
import time
FILENAME = "MaxaromaNA"
Subject = "MaxAroma_NewArrival"
def main():
url = "http://www.maxaroma.com/"
original = getMaxAroma(url)
# dumpToSave(original)
sendemail("Started " + FILENAME,Subject)
while True:
# get time to file the time
current_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
# check
try:
current =getMaxAroma(url)
log = ""
if current == original:
# ìf same write same
log = current_time + " same"
# if different then write email alert together with the content
else:
log = current_time + "\n" + "OLD: \n" + original + "\nNEW: \n" + current
sendemail(log,Subject)
original = current
writeToFile(log, FILENAME)
except:
logging.exception('OPPSS' + current_time)
time.sleep(15 * 60)
logging.basicConfig(level=logging.DEBUG, filename=FILENAME + 'error.txt')
main()
|
import subprocess
def executeCommand():
'''
Read a command from a user input, validate it, and execute it
''' |
from astral.adapters.Net import TCPServer,errors
from base_adapter import ClientAdapter
import socket
import threading
import time
class server_adapter(TCPServer):
def __init__(self,gameserver,host="127.0.0.1",port=1919):
TCPServer.__init__(self)
self.gameserver = gameserver
self.host = host
self.port = port
self.messages = []
self.addr = None
self.Send = lambda data:0
def sendback(self,data):
self.send_data(data)
def send_to_socket(self,sock,data):
self.sending_socket = sock
self.send_data(data)
def remove_socket(self,sock):
"""Add a try except here"""
try:
TCPServer.remove_socket(self,sock)
except ValueError:
import traceback
traceback.print_exc()
def input_func(self,sock,host,port,address):
"""This occurs for each socket we hear from right before handle_data is called"""
self.addr = address
self.Send = lambda data:self.send_to_socket(sock,data)
def handle_data(self,data,addr=None,send=None):
"""This occors on incoming data, right after input_func is called, but only if data is clean"""
if not addr:
addr = self.addr
if not send:
send = self.Send
self.messages.append((self.addr,self.Send,data))
def client_disconnect_func(self,sock,host,port,address):
"""Client disconnected"""
self.messages.append((address,lambda data:0,{"action":"disconnected"}))
def update(self):
#The threads should already be listening
for a in self.messages[:]:
self.gameserver.handle_data(*a)
self.messages.remove(a)
def _start(self):
self.ending = False
try:
self.connect(self.host,self.port)
except:
print("can't host")
self.handle_data({"action":"error","value":"hosting_error"})
self.ending = True
return
self.serve_forever()
self.quit()
self.ending = True
def start(self):
t = threading.Thread(target=self._start)
t.daemon = True
t.start()
self.t =t
def close(self):
try:
self.quit()
except:
pass
self.looping = False
if self.sending_socket:
self.sending_socket.close()
if self.unconnected_socket:
self.unconnected_socket.close()
if getattr(self,"connected_sockets",None):
for sock in self.connected_sockets:
sock.close()
#self.socketaddresses = {}
while not self.ending:
pass
from astral.adapters.Net import TCPClient,errors
class client_adapter(TCPClient,ClientAdapter):
def __init__(self,gameclient,host="127.0.0.1",port=1919):
TCPClient.__init__(self)
ClientAdapter.__init__(self,gameclient,host,port)
self.connect_to_server()
try:
self.connect(host,port)
except:
self.handle_disconnect()
return
self.handle_connect()
def send_to_server(self,data):
try:
self.send_data(data)
except:
return
def listen(self):
if not self.connect_state:
return
try:
data = self.check_for_data()
except errors.SocketError:
self.handle_disconnect()
return
if data:
self.handle_data(data)
self.flush()
def close(self):
self.quit()
adapter_hook = {("server","tcp"):server_adapter,("client","tcp"):client_adapter} |
# importing dataset
from sklearn.datasets import .... as fetch_olivetti_faces
faces = fetch_olivetti_faces()
img = faces.images
M = 400
N = 4096
img2 = np.array([[0 for i in range(N)] for j in range(M)])
# visage moyen = vm
vm = [0 for i in range(N)]
for a in img:
img2 = a.flatten()
vm = vm + img2
vm1 = vm / 400
vm2 = vm1.reshape(64, 64)
plt.imshow(vm2, amap='gray')
|
n=13
L=[ [ 0 for i in range(n) ] for j in range(n) ]
k,l=n-1,int((n+1)/2-1)
if (n%2!=0):
L[k][l]=1
for i in range (1,n**2):
if (L[(k+i)%n][(l+i)%n] == 0):
L[(k+i)%n][(l+i)%n] = i+1
else:
L[(k+i-2)%n][(l+i-1)%n]=i+1
k,l=(k+i-2)%n,(l+i-1)%n
for i in range(n):
print (L[i],'\n') |
from django.contrib import admin
from online_app.models import *
# Register your models here.
admin.site.register(Repo)
admin.site.register(Package) |
#!/usr/bin/python3
import xmlrpc.client
import time
import sys
if len(sys.argv) == 1:
print("USAGE: %s <server>" % sys.argv[0])
sys.exit(0)
s = xmlrpc.client.ServerProxy('http://%s:8000' % sys.argv[1])
pre = time.time()
response = s.ping()
post = time.time()
diff = post - pre
print(pre,response,post,diff)
# Print list of available methods
#print(s.system.listMethods()) |
#!/usr/bin/env python3
import yaml
from jinja2 import Template
from datetime import datetime
from kubernetes import client, config
def main():
kaniko("sidecar", "latest", "git://github.com/kbase/init-sidecar.git")
def kaniko(image, tag, repo):
# Set image name, and consistent timestamp
build_image_name = image
timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f')
# Set variables to be set in the template
data = {
"image_name": build_image_name + "-" + timestamp,
"image_tag": tag,
"repo_name": repo,
}
# Render YAML from Jinja template
with open('kaniko-template.j2') as file_:
j2_template = Template(file_.read())
build_yaml = j2_template.render(data)
# Begin K8s deploymnent
config.load_kube_config()
dep = yaml.safe_load(build_yaml)
k8s_apps_v1 = client.CoreV1Api()
resp = k8s_apps_v1.create_namespaced_pod(
body=dep, namespace="next")
print("Deployment created. status='%s'" % resp.metadata.name)
if __name__ == '__main__':
main()
|
import os
import csv
import time
import imaplib
import email
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, jsonify
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(DATADIR="data"))
app.config.from_envvar('QVTABLE_SETTINGS', silent=True)
datafullpath = os.path.join(app.root_path, app.config.get("DATADIR"))
def email_check():
'''
Check email if there are any new messages
'''
return
print("Checking email...")
mail = imaplib.IMAP4_SSL("imap.gmail.com")
(retcode, capabilities) = mail.login('example@gmail.com','pass')
mail.select(readonly=True)
(retcode, messages) = mail.search(None, '(UNSEEN)')
if retcode == 'OK':
for num in messages[0].split():
print('Processing')
typ, data = mail.fetch(num,'(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
print(response_part[1])
original = email.message_from_string(str(response_part[1]))
print (original['From'])
print (original['Subject'])
typ, data = mail.store(num,'+FLAGS','\\Seen')
@app.route('/')
def show_entries():
return render_template('main.html')
@app.route('/<filename>')
def show_data(filename):
filebasename = os.path.basename(filename)
ffullpath = os.path.join(datafullpath, filebasename)
if os.path.isfile(ffullpath):
return render_template('main.html', filename=filebasename)
return render_template('main.html')
@app.route('/data/')
def get_data_list():
datafiles = []
for f in os.listdir(datafullpath):
ffullpath = os.path.join(datafullpath, f)
if os.path.isfile(ffullpath):
filedatestr = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(os.path.getmtime(ffullpath)))
elem = {"name": f, "date": filedatestr}
datafiles.append(elem)
datafilessorted = sorted(datafiles, key=lambda k: k['date'], reverse=True)
email_check()
return jsonify(datafilessorted)
@app.route('/data/<filename>')
def get_data(filename):
# Check if the file exists
datapath = app.config.get("DATADIR")
datafilename = os.path.join(datafullpath, os.path.basename(filename))
if not os.path.exists(datafilename):
print("Couldn't find file: " + datafilename)
return None
# Read the file
out = []
with open(datafilename, 'r') as f:
# Detect the format of the csv file
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
csvreader = csv.reader(f, dialect)
for row in csvreader:
out.append(row)
return jsonify(out)
|
import urllib2
from pyIEM import iemdb
import mx.DateTime
i = iemdb.iemdb()
coop = i['coop']
mesosite = i['mesosite']
stmeta = {}
def parse_lonlat( txt ):
tokens = txt.split()
lat = float(tokens[0]) + ((float(tokens[1]) + float(tokens[2]) / 60.0) / 60.0)
lon = float(tokens[3]) - ((float(tokens[4]) + float(tokens[5]) / 60.0) / 60.0)
return lon, lat
for line in open('COOP.TXT'):
lon, lat = parse_lonlat( line[149:168] )
elev = float( line[168:176] )
name = line[99:129].strip()
st = line[59:61]
id = line[:6]
iemid = "%s%s" % (st, id[2:])
sql = """INSERT into stations(id, name, state, country, elevation, network, geom)
VALUES ('%s', '%s', '%s', 'US', %s, '%sCLIMATE', 'SRID=4326;POINT(%s %s)')""" % (
iemid, name, st, elev, st, lon, lat)
stmeta["%s%s" % (st, id) ] = sql
for id in stmeta.keys():
# Go checkout NCDC for data
fp = "http://cdo.ncdc.noaa.gov/climatenormals/clim84/%s/%s.txt" % (id[:2], id)
req = urllib2.Request(fp)
try:
lines = urllib2.urlopen(req).readlines()
except:
print 'Missing %s %s' % (id, fp)
continue
if len(lines) < 40:
continue
data = {}
stationid = '%s%s' % (id[:2].lower(), id[4:])
vars = ['low', 'high', 'blah','blah', 'blah', 'precip']
pointer = -1
try:
for line in lines:
tokens = line.replace("-99", " 0").strip().split()
if line[0] in ['-', ' ']:
continue
if tokens[0] == "JAN":
pointer += 1
ts = mx.DateTime.strptime("%s-01-2001" % (tokens[0],), '%B-%d-%Y')
days = ((ts + mx.DateTime.RelativeDateTime(months=1)) - ts).days
for v in range(int(days)):
ts0 = ts + mx.DateTime.RelativeDateTime(days=v)
if not data.has_key(ts0):
data[ts0] = {}
val = tokens[v+1]
data[ts0][ vars[pointer] ] = float(val)
for ts in data.keys():
sql = "INSERT into ncdc_climate71 (station, valid, high, low, precip) VALUES ('%s', '2000-%s', %s, %s, %s)" % (stationid, ts.strftime("%m-%d"), data[ts]['high'], data[ts]['low'], data[ts]['precip'] / 100.0)
coop.query(sql)
print 'Worked %s %s' % (len(data.keys()), stationid,)
except:
print 'Fail %s' % (id,)
continue
if id[:2] != 'IA':
try:
mesosite.query( stmeta[id] )
except:
pass
|
#! /usr/bin/python
import sys
scaffoldFile = sys.argv[1]
outputFile = "%s_unique.fasta" %(scaffoldFile)
infileScaffolds = open(scaffoldFile, "r")
outfile = open(outputFile, "w")
fastaDict = {}
key = 0
fastaDict[key] = []
for line in infileScaffolds:
if ">" in line:
joinLine = "".join(fastaDict[key])
fastaDict[key] = joinLine
key += 1
fastaDict[key] = []
if ">" not in line:
stripLine = line.strip("\n")
fastaDict[key].append(stripLine)
joinLine = "".join(fastaDict[key])
fastaDict[key] = joinLine
key = 0
for item in sorted(set(fastaDict.values())):
outfile.write(">Iengl_Schafran43_scaffold%d\n" %(key))
outfile.write("%s" %(item))
outfile.write("\n")
key += 1
print "%d unique scaffolds out of %d total" %(len(set(fastaDict.values())), len(fastaDict))
outfile.close()
infileScaffolds.close()
|
# Generated by Django 3.0.3 on 2020-03-21 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mywebsite', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='desc',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='project',
name='project_title',
field=models.CharField(default='', max_length=100),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 18:01:34 2015
@author: Erin
"""
# An implementation of example 2 from MT-DREAM(ZS) original Matlab code. (see Laloy and Vrugt 2012)
# 200 dimensional Gaussian distribution
import numpy as np
import os
from pydream.parameters import FlatParam
from pydream.core import run_dream
from pydream.convergence import Gelman_Rubin
def Latin_hypercube(minn, maxn, N):
y = np.random.rand(N, len(minn))
x = np.zeros((N, len(minn)))
for j in range(len(minn)):
idx = np.random.permutation(N)
P = (idx - y[:,j])/N
x[:,j] = minn[j] + P * (maxn[j] - minn[j])
return x
d = 200
A = .5 * np.identity(d) + .5 * np.ones((d,d))
C = np.zeros((d,d))
for i in range(d):
for j in range(d):
C[i][j] = A[i][j] * np.sqrt((i+1)*(j+1))
invC = np.linalg.inv(C)
mu = np.zeros(d)
if d > 150:
log_F = 0
else:
log_F = np.log(((2 * np.pi)**(-d/2))*np.linalg.det(C)**(- 1./2))
#Create initial samples matrix m that will be loaded in as DREAM history file
m = Latin_hypercube(np.linspace(-5, -5, num=d), np.linspace(15, 15, num=d), 1000)
np.save('ndim_gaussian_seed.npy', m)
def likelihood(param_vec):
logp = log_F - .5 * np.sum(param_vec*np.dot(invC, param_vec))
return logp
starts = [m[chain] for chain in range(3)]
params = FlatParam(test_value=mu)
if __name__ == '__main__':
niterations = 150000
# Run DREAM sampling. Documentation of DREAM options is in Dream.py.
converged = False
total_iterations = niterations
nchains = 3
sampled_params, log_ps = run_dream([params], likelihood, niterations=niterations, nchains=nchains, start=starts, start_random=False, save_history=True, adapt_gamma=False, gamma_levels=1, tempering=False, history_file='ndim_gaussian_seed.npy', multitry=5, parallel=False, model_name='ndim_gaussian')
for chain in range(len(sampled_params)):
np.save('ndimgauss_mtdreamzs_3chain_sampled_params_chain_'+str(chain)+'_'+str(total_iterations), sampled_params[chain])
np.save('ndimgauss_mtdreamzs_3chain_logps_chain_'+str(chain)+'_'+str(total_iterations), log_ps[chain])
os.remove('ndim_gaussian_seed.npy')
# Check convergence and continue sampling if not converged
GR = Gelman_Rubin(sampled_params)
print('At iteration: ', total_iterations, ' GR = ', GR)
np.savetxt('ndimgauss_mtdreamzs_3chain_GelmanRubin_iteration_' + str(total_iterations) + '.txt', GR)
old_samples = sampled_params
if np.any(GR > 1.2):
starts = [sampled_params[chain][-1, :] for chain in range(nchains)]
while not converged:
total_iterations += niterations
sampled_params, log_ps = run_dream([params], likelihood, niterations=niterations, nchains=nchains,
start=starts, start_random=False, save_history=True, adapt_gamma=False,
gamma_levels=1, tempering=False, multitry=5, parallel=False,
model_name='ndim_gaussian', restart=True)
for chain in range(len(sampled_params)):
np.save('ndimgauss_mtdreamzs_3chain_sampled_params_chain_' + str(chain) + '_' + str(total_iterations),
sampled_params[chain])
np.save('ndimgauss_mtdreamzs_3chain_logps_chain_' + str(chain) + '_' + str(total_iterations),
log_ps[chain])
old_samples = [np.concatenate((old_samples[chain], sampled_params[chain])) for chain in range(nchains)]
GR = Gelman_Rubin(old_samples)
print('At iteration: ', total_iterations, ' GR = ', GR)
np.savetxt('ndimgauss_mtdreamzs_5chain_GelmanRubin_iteration_' + str(total_iterations)+'.txt', GR)
if np.all(GR < 1.2):
converged = True
try:
# Plot output
import seaborn as sns
from matplotlib import pyplot as plt
total_iterations = len(old_samples[0])
burnin = total_iterations / 2
samples = np.concatenate((old_samples[0][burnin:, :], old_samples[1][burnin:, :], old_samples[2][burnin:, :]))
ndims = len(old_samples[0][0])
colors = sns.color_palette(n_colors=ndims)
for dim in range(ndims):
fig = plt.figure()
sns.distplot(samples[:, dim], color=colors[dim])
fig.savefig('PyDREAM_example_NDimGauss_dimension_' + str(dim))
except ImportError:
pass
else:
run_kwargs = {'parameters':[params], 'likelihood':likelihood, 'niterations':150000, 'nchains':3, 'start':starts, 'start_random':False, 'save_history':True, 'adapt_gamma':False, 'gamma_levels':1, 'tempering':False, 'history_file':'ndim_gaussian_seed.npy', 'multitry':5, 'parallel':False, 'model_name':'ndim_gaussian'}
|
def count(valuelist):
return_list = []
for a in range(0,len(valuelist)-1):
return_list.append(abs(valuelist[a]-valuelist[a+1]))
return return_list
test_case = int(input())
while(test_case):
input_list = list(input())
reversed_input_list = reversed(input_list)
input_list = [ord(x) for x in input_list]
reversed_input_list = [ord(x) for x in reversed_input_list]
input_list = count(input_list)
reversed_input_list = count(reversed_input_list)
if(input_list == reversed_input_list):
print("Funny")
else:
print("Not Funny")
test_case -= 1 |
import os
import random
import string
import setup_catalog
from google.api_core.client_options import ClientOptions
from google.cloud.retail_v2 import SearchServiceClient, SearchRequest
project_number = "1038874412926"
endpoint = "retail.googleapis.com"
isolation_filter_key = "INTEGRATION_FILTER_KEY"
title_query = "Nest_Maxi"
visitor_id = "visitor"
test_id = ''.join(random.sample(string.ascii_lowercase, 1))
# [START search_client]
default_catalog = "projects/{0}/locations/global/catalogs/default_catalog/branches/0".format(project_number)
default_search_placement = "projects/1038874412926/locations/global/catalogs/default_catalog/placements/default_search"
def get_search_service_client():
client_options = ClientOptions(endpoint)
return SearchServiceClient(client_options=client_options)
# [END search_client]
def build_isolation_filter(test__id: str):
return 'attributes.{0}: ANY("{1}")'.format(isolation_filter_key, test__id)
# [START search_product_with_boost_spec]
def search_products_with_boost_spec(query: str, _condition: str, _boost_strength: float):
boost_spec = SearchRequest().BoostSpec()
boost_spec.ConditionBoostSpec().condition = _condition
boost_spec.ConditionBoostSpec.boost = _boost_strength
search_request = SearchRequest()
search_request.placement = default_search_placement
search_request.branch = default_catalog
search_request.query = query
search_request.filter = build_isolation_filter(test_id)
search_request.visitor_id = visitor_id
search_request.boost_spec = boost_spec
print("---search request---")
print(search_request)
return get_search_service_client().search(search_request)
# [END search_product_with_boost_spec]
def search():
setup_catalog.ingest_products(test_id)
search_response = search_products_with_boost_spec(title_query, "(colorFamily: ANY(\"blue\"))", 0.5)
print("BOOST SEARCH RESULTS")
print(search_response.results)
setup_catalog.delete_products()
search()
|
from itertools import product
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
X = np.array([[0,0],[2,0],[4,0],[6,0],[8,0],[10,0],[12,0],[14,0],[16,0],[0,2],
[2,2],[4,2],[6,2],[8,2],[10,2],[12,2],[14,2],[16,2]])
y = np.array([-54,-60,-62,-64,-66,-68,-70,-72,-74,-60,-62,-64,-66,
-68,-70,-72,-74,-76])
# Input space
x1 = np.linspace(X[:,0].min(), X[:,0].max()) #p
x2 = np.linspace(X[:,1].min(), X[:,1].max()) #q
x = (np.array([x1, x2])).T
print(x)
kernel = C(1.0, (1e-3, 1e3)) * RBF([5,5], (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=15)
gp.fit(X, y)
x1x2 = np.array(list(product(x1, x2)))
y_pred, MSE = gp.predict(x1x2, return_std=True)
X0p, X1p = x1x2[:,0].reshape(50,50), x1x2[:,1].reshape(50,50)
Zp = np.reshape(y_pred,(50,50))
# alternative way to generate equivalent X0p, X1p, Zp
# X0p, X1p = np.meshgrid(x1, x2)
# Zp = [gp.predict([(X0p[i, j], X1p[i, j]) for i in range(X0p.shape[0])]) for j in range(X0p.shape[1])]
# Zp = np.array(Zp).T
fig = plt.figure(figsize=(10,8))
#ax = fig.add_subplot(111)
#ax.pcolormesh(X0p, X1p, Zp)
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X0p, X1p, Zp, rstride=1, cstride=1, cmap='jet', linewidth=0, antialiased=False)
plt.show() |
import aiohttp_cors
from app.api import profile, roadmaps, skills, spec, vacancies
def setup_routes(app):
"""Добавлена точка входа для приложения."""
cors = aiohttp_cors.setup(app, defaults={
'*': aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers='*',
allow_headers='*',
),
})
cors.add(app.router.add_get('/spec', spec.handler))
cors.add(app.router.add_get('/vacancies', vacancies.handler))
cors.add(app.router.add_post('/skills', skills.handler))
cors.add(app.router.add_get('/profile/known', profile.known_handler))
cors.add(app.router.add_get('/profile/unknown', profile.unknown_handler))
cors.add(app.router.add_get('/profile/score', profile.score_handler))
cors.add(app.router.add_post('/profile/complete', profile.complete_handler))
cors.add(app.router.add_get('/profile/courses', profile.courses_handler))
cors.add(app.router.add_get('/roadmaps', roadmaps.handler))
|
import hashlib
import random
from string import ascii_uppercase, digits
from rest_framework.response import Response
from rest_framework import status
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
from .serializers import TransactionModelSerializer
from .models import Transaction
from rest_framework.decorators import list_route
from rest_framework.permissions import IsAdminUser, IsAuthenticated, AllowAny
from .money_wave_utils import *
import json
from config.settings.keys import *
class TransactionModelViewSet(ModelViewSet):
model = Transaction
permission_classes = [AllowAny]
serializer_class = TransactionModelSerializer
def get_queryset(self):
return Transaction.objects.filter(user=self.request.user).order_by('-modified_on')
@list_route(methods=['post'])
def get_banks(self, request):
"""
get all banks for selected country
:param request:
:return: <Response Object>
"""
country = self.request.data['country']
banks = get_banks(country)
return Response({"banks": banks}, status=status.HTTP_200_OK)
@list_route(methods=['post'])
def resolve_account(self, request):
"""
resolve an account
:param request:
:return:
"""
bank_code = request.data['bank_code']
account_number = request.data['account_number']
currency = request.data['country']
resolve = resolve_account(account_number, bank_code, currency)
if not resolve:
return Response(status=status.HTTP_404_NOT_FOUND)
else:
print(resolve)
return Response({"account": resolve}, status=status.HTTP_200_OK)
@list_route(methods=['post'])
def ravepayment_request(self, request):
hashedPayload = ''
payload = {
"PBFPubKey": FLW_API_KEY,
"amount": request.data['amount'],
"payment_method": "both",
"custom_description": "Kaimun",
"custom_title": "Instant Money Transfers",
"country": request.data['country'],
"currency": request.data['currency'],
"customer_email": request.user.email,
"customer_firstname": request.user.first_name,
"customer_lastname": request.user.last_name,
# "customer_phone": request.data['phone'],
"txref": "KMN-" + ''.join(random.sample((ascii_uppercase+digits), 5))
}
# sort payload and concatenate into a single string
sorted_payload = sorted(payload)
# concatenate sorted_payload. The payload is rearranged and the values concatenated in the order of the sorted keys.
hashed_payload = ''
for value in sorted_payload:
hashed_payload += value
hashed_string = hashed_payload + "FLWSECK-b86e4802fc5eaa03db5e7f73fdc4514e-X"
integrity_hash = hashlib.sha256(hashed_string.lower().encode()).hexdigest()
return Response({'payload': payload, 'integrityHash': integrity_hash})
@list_route(methods=['post'])
def ravepay_deposit(self, request):
# instance = self.get_object()
url = "https://ravesandboxapi.flutterwave.com/flwv3-pug/getpaidx/api/xrequery"
data = {
"txref": request.data['txRef'],
"SECKEY" : FLW_API_SECRET,
"include_payment_entity": 1
}
response = requests.post(url, data=data).json()
account_number = request.data['account_number']
bank_code = request.data['bank_code']
currency = request.data['currency']
amount = float(request.data['amount'])
narration = request.data['narration']
sender = request.user.first_name + ' ' + request.user.last_name
# confirm that the response for the transaction is successful
if response['status'] == 'success':
data = response['data']
if data[0]['chargecode'] == '00':
chargedamount = float(data[0]['chargedamount'])
if chargedamount > amount:
make_transfer = disburse(account_number, bank_code, amount, narration, currency, sender)
if make_transfer:
return Response({'message': 'Successfully Sent Funds'}, status=status.HTTP_200_OK)
else:
return Response({'message': 'Unable to send funds'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({'message': 'Unable to send funds'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({'message': 'Transaction was not successful'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
##changing this to ok for simulation purposes, the moneywave api isn't bring the correct response
else:
return Response({'message': '4Unable to send funds'}, status=status.HTTP_200_OK)
|
import gtk
from System import SystemType
current_system = SystemType()
class StatusIcon:
def __init__(self, parent):
self.parent = parent
iconpath = "WWU.gif"
self.statusicon = gtk.StatusIcon()
self.statusicon.set_from_file(iconpath)
self.statusicon.connect("button_press_event", self.click_event)
# self.statusicon.connect("activate", self.click_event)
self.statusicon.set_tooltip("WWU wifi")
self.window = gtk.Window()
self.window.show_all()
self.window.hide()
self.parent.logged = False
def click_event(self, widget, event):
if event.button == 1:
menu = gtk.Menu()
if self.parent.logged:
logout = gtk.MenuItem("Logout")
else:
login = gtk.MenuItem("Login")
about = gtk.MenuItem("About")
quit = gtk.MenuItem("Quit")
if self.parent.logged:
logout.connect("activate", self.logout)
else:
login.connect("activate", self.login)
about.connect("activate", self.show_about_dialog)
if current_system == "linux":
quit.connect("activate", self.parent.quit)
else:
quit.connect("activate", gtk.main_quit)
if self.parent.logged:
menu.append(logout)
else:
menu.append(login)
menu.append(about)
menu.append(quit)
menu.show_all()
menu.popup(None, None, gtk.status_icon_position_menu, event.button, event.time, self.statusicon)
def show_about_dialog(self, widget):
about_dialog = gtk.AboutDialog()
about_dialog.set_destroy_with_parent(True)
about_dialog.set_name("about wwu-auth")
about_dialog.set_version("0.1")
about_dialog.set_authors(["Morgan Borman"])
about_dialog.run()
about_dialog.destroy()
def set_visibility(self, visibility):
self.statusicon.set_visible(visibility)
def set_blinking(self, blinking):
self.statusicon.set_blinking(blinking)
def logout(self, opt):
self.parent.wwu_de_auth()
self.parent.logged = False
def login(self, opt):
self.parent.wwu_auth()
self.parent.logged = True
|
import requests
import time,random
from bs4 import BeautifulSoup
from urllib import request
def getData(data):
string=""
time,temp,pict,condi,confort,rain,msg=[],[],[],[],[],[],[]
for data_ in data:#取得時間、溫度、天氣狀況、舒適度、降雨機率等資料
time.append(data_.find('th',{'scope':'row'}).text)
temp.append(data_.find_all('td')[0].text)
condi.append(data_.find('img')['title'])
confort.append(data_.find_all('td')[2].text)
rain.append(data_.find_all('td')[3].text)
if "雨" in str(condi[0]): msg.append("記得帶雨傘唷!!")
elif "晴" in str(condi[0]):msg.append("要記得塗防曬喔~~~~")
elif "多雲" in str(condi[0]):msg.append("今天是個適合運動的日子")
else :msg.append("每一天都是新的一天!")
break
#for i in range(len(time)):
string+="時間:%s \n溫度:%s (℃) \n天氣狀況:%s \n舒適度:%s \n降雨機率:%s \n我想對你說:%s"%(time[0],temp[0],condi[0],confort[0],rain[0],msg[0])
return string
def Country(text):
dic={"Taipei_City.htm":["台北市","臺北市","台北","臺北"],"New_Taipei_City.htm":["新北市","新北"],"Taoyuan_City.htm":["桃園市","桃園"],\
"Taichung_City.htm":["臺中市","台中市","台中","臺中"],"Tainan_City.htm":["臺南市","台南市","台南","臺南"],"Kaohsiung_City.htm":["高雄市","高雄"],\
"Keelung_City.htm":["基隆市","基隆"],"Hsinchu_City.htm":["新竹市"],"Hsinchu_County.htm":["新竹縣"],"Miaoli_County.htm":["苗栗縣","苗栗"],\
"Changhua_County.htm":["彰化縣","彰化"],"Nantou_County.htm":["南投縣","南投"],"Yunlin_County.htm":["雲林縣","雲林"],\
"Chiayi_City.htm":["嘉義市"],"Chiayi_County.htm":["嘉義縣"],"Pingtung_County.htm":["屏東縣","屏東"],"Yilan_County.htm":["宜蘭縣","宜蘭"],\
"Hualien_County.htm":["花蓮縣","花蓮"],"Taitung_County.htm":["臺東縣","台東縣","台東","臺東"],"Penghu_County.htm":["澎湖縣","澎湖"],\
"Kinmen_County.htm":["金門縣","金門"],"Lienchiang_County.htm":["連江縣","連江"]}
for k,v in dic.items(): #連進縣市天氣資訊頁面
if text in v :
url ="https://www.cwb.gov.tw/V7/forecast/taiwan/"+k
break
response=requests.get(url)
html=response.content
html_doc=str(html,'utf-8')
soup = BeautifulSoup(html_doc,"html.parser")
data=soup.find('div',{'class':'BoxContent clearfix'}).find('table',{'class':'FcstBoxTable01'}).find('tbody').find_all('tr')#取得標籤資料
return getData(data)
def dataurl():
url="https://www.cwb.gov.tw/V7/forecast/UVI/UVI.htm"
http="https://www.cwb.gov.tw"
response=requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
data=soup.find('div',{'class':'UVITWmap'})["style"].split("url(")[1][:-1]
return request.urlopen(http+data)
def main():
Country('台北市')
return
if __name__ == '__main__':
main() |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Time : 2017-08-25 14:33
# Author : MrFiona
# File : summary_optparse.py
# Software: PyCharm Community Edition
"""
模块optparse使用类OptionParser来作为命令行选项的解析器;下面是该类的方法:
1、OptionParser(self, prog=None, usage=None, description=None, epilog=None, option_list=None,
option_class=<class optparse.Option>, version=None, conflict_handler='error', formatter=None, add_help_option=True)
构造函数__init__(),用于创建一个命令行选项解析器实例;其中,参数:
description:
usage : 描述当前脚本程序的用法字符串;显示该用法之前,格式"%prog"将被格式化成当前脚本程序的名称
prog : 默认为当前脚本程序的名称 os.path.basename(sys.argv[0])
description : 当前脚本程序的简单描述、摘要、大纲;它会被显示在命令行选项的帮助之前
epilog : 当前脚本程序的简单描述、摘要、大纲;它会被它会被显示在命令行选项的帮助之后
conflict_handler : 命令行选项冲突处理器;比如,当命令行选项重复时,该如何让处理;可选值:error、resolve
add_help_option : 是否自动生成帮助信息;True:是; False:否; 默认值是True
option_list : 当前脚本程序的命令行选项列表;这个选项列表在standard_options_list中选项添加之后,
但是在版本和帮助选项添加之前;可以手工创建该列表,该列表中的元素都使用函数make_option()生成
例如 : option_list=[make_option("-f","--file",action="store",type="string",dest="filename"), ...]
option_class : 在使用函数add_option()添加命令行选项到解析器时使用的类;默认为optparse.Option类
version : 打印版本的信息
formatter : 帮助信息格式;有两种格式:IndentedHelpFormatter和TitledHelpFormatter;
其中,参数prog在usage和version中使用格式字符串"%prog"代替os.path.basename(sys.argv[0])
2、OptionParser.add_option(self, *args, **kwargs)
该函数用于添加命令行选项;参数*args用于传递一个命令行选项的列表;**kwargs用于传递该命令行选项的属性;有几种用法:
1、parser.add_option(self, optparse.Option):直接添加一个命令行选项类的实例
2、parser.add_option(self, option_list):直接添加一个命令行选项列表;option_list=[make_option(), ...]
3、parser.add_option(*opt_str, ..., kwarg=val, ...)
常用的是第三种;这种用法的函数原型如下:
optparse.OptionParser.add_option(short_option[, long_option], action="store", type="store", dest=None, nargs=1,
default=None, help="help text", metavar="");其中,参数如下:
description:
short_option : 短选项字符串;例如,"-f"、"-X"
long_option : 长选项字符串;例如,"--file"、"--start";长选项和短选项共同构成可选参数*args或*opt_str
action : 行为字符串;它指示optparse当解析到一个命令行选项时该如何处理;可选值store、store_true、store_false、store_const、
append、append_const、count、callback、help、version;默认值是store,表示把命令行参数保存到options对象中
type : 当action的值为存储方式时,type表示存储的数据的类型;有string、int、long、float、complex、choice
dest : 当action的值为存储方式时,dest表示用于存储数据(当前命令行选项附加的参数值)的变量,它会成为函数parse_args()返回的options对象的属性,
通过"对象名.属性名"的方式引用;如果不指定dest参数的值,则使用当前命令行选项的长选项名作为dest参数的缺省值和options对象的属性,
来存储当前命令行选项的附加参数值;如果当前命令行选项没有指定长选项,则使用短选项名作为dest参数的缺省值和options对象的属性,
来存储当前命令行选项的附加参数值
nargs : 指定当前命令行选项应该需要接受的附加参数值的个数;默认值为1;多个参数值之间用空格隔开;当在命令行为该选项输入的附加参数值的个数多于
nargs指定的个数时,则值取用前面的nargs个;当在命令行上为该选项输入的附加参数值的个数少于nargs所指定的个数时,则会报错;
如果nargs>1,则python解释器会把这nargs个参数值组装成一个元组(tuple),然后把这个元组传递给当前程序使用
default : 当action的值为存储方式时,default用于指定dest表示的属性变量的缺省值,即,当前命令行选项附加的参数的缺省值
help : 当前命令行选项的帮助、说明信息
metavar:占位字符串;用于在输出帮助信息时,代替当前命令行选项的附加参数的值进行输出;例如:"-f FILE --file FILE";这个例子中,字符串"FILE"就是metavar的值
例如 : add_option("-f", "--file", action="store", type="string", dest="fileName", default="file.txt", help="save host info", metavar="FILE");
当调用parse_args()函数之后,会返回一个options对象,dest参数的值"fileName"将作为options对象的属性名使用,即:options.fileName;同时把当前命令行选项的
附加参数值保存在该属性中,即:options.fileName="file.txt"
3、(options,args) = optparse.OptionParser.parse_args(self, args=None, values=None)
该函数用于解析命令行参数;其中,参数:
description:
args : 用于传递需要被解析的命令行选项列表;默认是sys.argv[1:]
values : 用于传递命令行选项的附加参数值的对象;是optparse.Values类的对象;
返回值:是一个包含(options,args)对的tuple
args : 所有被处理的参数之后的剩余参数
4、optparse.OptionParser.has_option(self, opt_str):
该函数用于判断OptionParser实例是否有一个名为opt_str的命令行选项;返回值:True-有; False-无;
5、optparse.OptionParser.get_option(self, opt_str):
该函数用于获取命令行选项opt_str的实例;若没有该选项,则返回None;
6、optparse.OptionParser.remove_option(self, opt_str):
该函数用于移除命令行选项opt_str;若OptionParser对象中存在命令行选项opt_str,则移除,否则抛出ValueError异常;
若存在多个opt_str的选项,则所有相关的选项都变成无效;
7、optparse.OptionParser.destroy() : 该函数用于销毁OptionParser对象实例;
"""
import sys
import optparse
parser = optparse.OptionParser(usage="usage: %prog [options] arg1 arg2 .....", version="1.0",
description="This is optparse example code")
def doStop(option, opt_str, value, parser): # 最小化定义,不需要接收参数;
print("option:", option)
print("opt_str", opt_str)
print("value:", value)
print("stopping the web server ......")
print("largs:", parser.largs)
print("rargs:", parser.rargs)
def doStart(option, opt_str, value, parser, *args, **kwargs): # 最大化定义,需要接收参数;
print("option:", option)
print("opt_str", opt_str)
print("value:", value)
print("*args:", args)
print("*kwargs:", kwargs)
print("starting the web server ......")
print("largs:", parser.largs)
print("rargs:", parser.rargs)
parser.add_option("--start", action="callback", callback=doStart, callback_args=("192.168.0.253", 3307),
callback_kwargs={"user": "user", "pswd": "pwd"}, nargs=3, default=None, metavar="START")
parser.add_option("--stop", action="callback", callback=doStop, default=None, metavar="STOP")
def doStart(option, opt_str, value, parser, *args, **kwargs):
print("option:", option)
print("opt_str", opt_str)
print("value:", value)
print("*args:", args)
print("*kwargs:", kwargs)
print("starting the web server ......")
print("largs:", parser.largs)
print("rargs:", parser.rargs)
def doStop(option, opt_str, value, parser):
print("option:", option)
print("opt_str", opt_str)
print("value:", value)
print("stopping the web server ......")
print("largs:", parser.largs)
print("rargs:", parser.rargs)
def Main(argc, argv):
strUsage = "Usage: %prog [option] args"
parser = optparse.OptionParser(usage=strUsage, description="this program is used for study")
parser.add_option("-f", "--file", action="store", type="string", dest="fileName", help="configation file",
metavar="FILE")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=True)
parser.add_option("-q", "--quit", action="store_false", dest="verbose", default=False)
parser.add_option("-H", "--host", action="store", type="string", dest="strHost", nargs=3, default="127.0.0.1",
help="Remote Host IP(IP1 IP2 IP3)", metavar="IP")
parser.add_option("-p", "--port", action="store", type="int", dest="iPort", nargs=3, default="3306",
help="Remote Host Port(Port1 Port2 Port3)", metavar="PORT")
parser.add_option("-u", "--user", action="store", type="string", dest="strUserName", default="admin",
help="Your user name", metavar="UserName")
parser.add_option("-P", "--password", action="store", type="string", dest="strPassword", default="admin",
help="Your password", metavar="Password")
parser.add_option("-i", "--input", action="store", type="string", dest="strInput", default="input.txt",
help="as a file for input", metavar="FILE")
parser.add_option("--start", action="callback", callback=doStart, callback_args=("192.168.0.253", 3307),
callback_kwargs={"user": "user", "pswd": "pwd"}, nargs=3, default=None, metavar="START")
parser.add_option("--stop", action="callback", callback=doStop, default=None, metavar="STOP")
parser.add_option("-a", action="store_const", dest="const_value", default='default_const_value',
const='store_const default value', help='Set a constant const value')
parser.add_option("-c", action="store_true", dest="boolean_switch", default=True, help='Set a switch to True')
parser.add_option("-d", action="store_false", dest="boolean_switch", default=False, help='Set a switch to False')
parser.add_option("-e", action="append", dest="collection", default=[], help='Add repeated values to a list')
parser.add_option("-W", action="append_const", dest="const_collection", const='value-1-to-append', default=[],
help='Add different values to list')
parser.add_option("-D", action="append_const", dest="const_collection", const='value-2-to-append', default=[],
help='Add different values to list')
if (argc < 1):
parser.error("invalid argument for commond line;")
parser.print_help()
sys.exit(1)
# 命令行参数解析和处理
(options, largs) = parser.parse_args()
if (options.fileName):
print("read this file ......")
if (options.strHost):
print("connect to remote host ......")
print("---------options-----------")
print(options)
print("---------largs-----------")
print(largs)
print("---------fileName-----------,", options.fileName)
print("---------strHost-----------,", options.strHost)
print("---------iPort-----------,", options.iPort, type(options.iPort))
print("---------largs-----------,", parser.largs)
print("---------rargs-----------,", parser.rargs)
print("---------values-----------,", parser.values)
print 'store_const:\t', options.const_value
print 'boolean_switch:\t', options.boolean_switch
print 'collection:\t', options.collection
print 'const_collection:\t', options.const_collection
print 'const_collection:\t', options.const_collection
if __name__ == "__main__":
argv = sys.argv[1:]
argc = len(argv)
Main(argc, argv) |
from __future__ import absolute_import
import re
import json
import requests
from apis.base import BaseAPI, APIException
# Flickr API: https://www.flickr.com/services/api/
class FlickrPhotoAPI(BaseAPI):
url_format = "https://farm{farm}.staticflickr.com/{server}/{id}_{secret}_m.jpg"
per_page = 10
def __init__(self, user, max_imgs):
super(FlickrPhotoAPI, self).__init__(user, max_imgs)
self.window_cur = 0
self.get_api_key()
self.load()
def get_api_key(self):
r = requests.get("https://flickr.com/photos/")
if r.status_code == 200:
m = re.search(b'root.YUI_config.flickr.api.site_key = "(.+?)";', r.content)
if m:
self.api_key = m.group(1)
return
raise APIException("Can't get API key from flickr")
def load(self, page=1):
r = requests.get("https://api.flickr.com/services/rest", params={
"method": "flickr.photos.search" if self.user else "flickr.photos.getRecent",
"format": "json",
"user_id": self.user,
"api_key": self.api_key,
"per_page": self.per_page,
"page": page
})
self.page = page
if r.status_code != 200:
self.max_imgs = 0
raise StopIteration()
else:
content = r.content.replace(b"jsonFlickrApi(", b"").rstrip(b")")
self.json = json.loads(content)
if self.json['stat'] == 'ok':
self.total = int(self.json['photos']['total'])
self.items = self.json['photos']['photo']
self.window_cur = 0
else:
raise APIException(self.json['message'])
def __next__(self):
if self.cur >= self.max_imgs or self.cur >= self.total:
raise StopIteration()
if self.window_cur >= len(self.items):
self.load(self.page+1)
item = self.items[self.window_cur]
self.window_cur += 1
self.cur += 1
return self.url_format.format(**item) |
from aiohttp import web
from docker import Client
from .launcher import Launcher
async def create_container_handler(request):
launch_config = request.app['launch_config']
hostname = await request.app['launcher'].launch(**launch_config)
headers = {'Access-Control-Allow-Origin': '*'}
return web.Response(text=hostname, headers=headers)
def create_app():
app = web.Application()
configure_app(app)
add_routes(app)
return app
def configure_app(app):
app['launcher'] = Launcher(docker_client=Client(), max_workers=4)
app['launch_config'] = dict(image='pyepics-workshop-ioc',
network='pyepics-workshop',
tty=True)
def add_routes(app):
app.router.add_post('/new', create_container_handler)
def main():
app = create_app()
web.run_app(app)
|
# Generated by Django 3.1.3 on 2020-11-08 16:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20201105_1100'),
]
operations = [
migrations.AddField(
model_name='game',
name='end_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='game',
name='start_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='guestprofile',
name='guest_id',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AddField(
model_name='guestprofile',
name='lost',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='guestprofile',
name='won',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofile',
name='lost',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofile',
name='won',
field=models.IntegerField(default=0),
),
]
|
# -*- coding: utf-8 -*-
import itertools
import time
from flask import Flask, jsonify, request, render_template
import os
import io
import firebase_admin
from firebase_admin import db
from firebase_admin import credentials, firestore
from google.cloud import storage
from PIL import Image
import requests
from io import BytesIO
import urllib.request as req
from PIL import Image
import glob
import json
import base64
import threading
from datetime import datetime
# from sqlalchemy import create_engine
# from flask_mysqldb import MySQL
from products import Product
from users import User
from datetime import date
from ast import literal_eval
from decimal import Decimal
import pysftp
import sys
import uuid
app = Flask(__name__)
# application.config['MYSQL_HOST'] = 'aad4ceauedwkx7.ctvp1qfizfsm.us-east-2.rds.amazonaws.com'
# application.config['MYSQL_USER'] = 'root'
# application.config['MYSQL_PASSWORD'] = '27031984As'
# application.config['MYSQL_DB'] = 'ebdb'
# mysql = MySQL(application)
cred = credentials.Certificate("dom-marino-ws-firebase-adminsdk-x049u-1128490a39.json")
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "dom-marino-ws-firebase-adminsdk-x049u-1128490a39.json"
firebase_admin.initialize_app(cred)
# firebase_admin.initialize_app(cred, {
# 'databaseURL': 'https://dom-marino-ws.firebaseio.com/',
# 'storageBucket': 'dom-marino-ws.appspot.com'
# })
# category = 'non_alcoholic_beverages'
# category = 'alcoholic_beverages'
# category = 'beers'
# category = 'candy_pizzas'
# category = 'flapts'
category = 'gourmet_pizzas'
# category = 'pizza_edges'
# category = 'traditional_pizzas'
# category = 'wines'
document_id = ''
imageurl = ''
thumbnailurl = ''
client = storage.Client()
# https://console.cloud.google.com/storage/browser/[bucket-id]/
bucket = client.get_bucket('dom-marino-ws.appspot.com')
# Then do other things...
# blob = bucket.get_blob('categories/beers/beer_icon.png')
# # print(blob.download_as_string())
# blob.upload_from_file('pictures/products/')
# blob2 = bucket.blob('products/' + category + document_id)
# blob2.upload_from_filename(filename='teste.txt')
# imgurl ="https://i.pinimg.com/originals/68/7c/ec/687cec1f523e3ee2b666c38e055a4d6d.png"
# req.urlretrieve(imgurl, "soft_drinks.png")
db = firestore.client()
todo_ref = db.collection('todos')
categories_ref = db.collection('categories')
users_ref = db.collection('users')
orders_ref = db.collection('orders')
non_alcoholic_beverages_ref = db.collection('products').document('non_alcoholic_beverages').collection(
'non_alcoholic_beverages')
alcoholic_beverages_ref = db.collection('products').document('alcoholic_beverages').collection('alcoholic_beverages')
beers_ref = db.collection('products').document('beers').collection('beers')
candy_pizzas_ref = db.collection('products').document('candy_pizzas').collection('candy_pizzas')
flapts_ref = db.collection('products').document('flapts').collection('flapts')
gourmet_pizzas_ref = db.collection('products').document('gourmet_pizzas').collection('gourmet_pizzas')
pizza_edges_ref = db.collection('products').document('pizza_edges').collection('pizza_edges')
traditional_pizzas_ref = db.collection('products').document('traditional_pizzas').collection('traditional_pizzas')
wines_ref = db.collection('products').document('wines').collection('wines')
promotions_ref = db.collection('products').document('promotions').collection('promotions')
two_flavored_pizzas_ref = db.collection('products').document('two_flavored_pizzas').collection('two_flavored_pizzas')
users_ref = db.collection('users')
working_hours_ref = db.collection('workinghours')
# get all the png files from the current folder
# for infile in glob.glob("*.png"):
# for infile in glob.glob("soft_drinks.png"):
# im = Image.open(infile)
# # don't save if thumbnail already exists
# if infile[0:2] != "T_":
# # prefix thumbnail file with T_
# im.save("T_" + infile, "PNG")
#
# data = {}
# with open('T_soft_drinks.png', mode='rb') as file:
# img = file.read()
# data['img'] = base64.encodebytes(img).decode("utf-8")
#
# print(json.dumps(data))
accounts = [
{'name': "Billy", 'balance': 450.0},
{'name': "Kelly", 'balance': 250.0}
]
all_categories = []
all_non_alcoholic_beverages = []
all_alcoholic_beverages = []
all_beers = []
all_pizza_edges = []
all_flapts = []
all_candy_pizzas = []
all_gourmet_pizzas = []
all_traditional_pizzas = []
all_wines = []
all_promotions = []
all_two_flavored_pizzas = []
all_orders = []
all_users = []
# HTTP GET
# HTTP POST
# HTTP PUT
# HTTP DELETE
# HTTP PATCH
# @app.route('/heroes', methods=['POST'])
# def create_hero():
# req = request.json
# hero = SUPERHEROES.push(req)
# return jsonify({'id': hero.key}), 201
# Create a callback on_snapshot function to capture changes
def on_snapshot(doc_snapshot, changes, read_time):
for doc in doc_snapshot:
print(u'Received document snapshot: {}'.format(doc.id))
def on_categories_snapshot(doc_snapshot, changes, read_time):
# print("entrou")
# print("on_categories_snapshot, closed=", cat_watch._closed)
global all_categories
all_categories = []
for doc in doc_snapshot:
category = doc.to_dict()
all_categories.append(category)
# print(category["description"])
def on_nab_snapshot(doc_snapshot, changes, read_time):
global all_non_alcoholic_beverages
all_non_alcoholic_beverages = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = non_alcoholic_beverages_ref.document(doc.id).collection('images').stream()
prices = non_alcoholic_beverages_ref.document(doc.id).collection('prices').stream()
price_broto_stream = non_alcoholic_beverages_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = non_alcoholic_beverages_ref.document(doc.id).collection('prices').document(
'inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_non_alcoholic_beverages.append(product)
def on_ab_snapshot(doc_snapshot, changes, read_time):
global all_alcoholic_beverages
all_alcoholic_beverages = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = alcoholic_beverages_ref.document(doc.id).collection('images').stream()
prices = alcoholic_beverages_ref.document(doc.id).collection('prices').stream()
price_broto_stream = alcoholic_beverages_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = alcoholic_beverages_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_alcoholic_beverages.append(product)
def on_beers_snapshot(doc_snapshot, changes, read_time):
global all_beers
all_beers = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = beers_ref.document(doc.id).collection('images').stream()
prices = beers_ref.document(doc.id).collection('prices').stream()
price_broto_stream = beers_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = beers_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_beers.append(product)
def on_candy_pizzas_snapshot(doc_snapshot, changes, read_time):
global all_candy_pizzas
all_candy_pizzas = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = candy_pizzas_ref.document(doc.id).collection('images').stream()
prices = candy_pizzas_ref.document(doc.id).collection('prices').stream()
price_broto_stream = candy_pizzas_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = candy_pizzas_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_candy_pizzas.append(product)
def on_flapts_snapshot(doc_snapshot, changes, read_time):
global all_flapts
all_flapts = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = flapts_ref.document(doc.id).collection('images').stream()
prices = flapts_ref.document(doc.id).collection('prices').stream()
price_broto_stream = flapts_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = flapts_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_flapts.append(product)
def on_pizza_edges_snapshot(doc_snapshot, changes, read_time):
global all_pizza_edges
all_pizza_edges = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = pizza_edges_ref.document(doc.id).collection('images').stream()
prices = pizza_edges_ref.document(doc.id).collection('prices').stream()
price_broto_stream = pizza_edges_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = pizza_edges_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_pizza_edges.append(product)
def on_traditional_pizzas_snapshot(doc_snapshot, changes, read_time):
global all_traditional_pizzas
all_traditional_pizzas = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = traditional_pizzas_ref.document(doc.id).collection('images').stream()
prices = traditional_pizzas_ref.document(doc.id).collection('prices').stream()
price_broto_stream = traditional_pizzas_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = traditional_pizzas_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_traditional_pizzas.append(product)
def on_gourmet_pizzas_snapshot(doc_snapshot, changes, read_time):
global all_gourmet_pizzas
all_gourmet_pizzas = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = gourmet_pizzas_ref.document(doc.id).collection('images').stream()
prices = gourmet_pizzas_ref.document(doc.id).collection('prices').stream()
price_broto_stream = gourmet_pizzas_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = gourmet_pizzas_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_gourmet_pizzas.append(product)
def on_wines_snapshot(doc_snapshot, changes, read_time):
global all_wines
all_wines = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = wines_ref.document(doc.id).collection('images').stream()
prices = wines_ref.document(doc.id).collection('prices').stream()
price_broto_stream = wines_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = wines_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_wines.append(product)
def on_promotions_snapshot(doc_snapshot, changes, read_time):
global all_promotions
all_promotions = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = promotions_ref.document(doc.id).collection('images').stream()
prices = promotions_ref.document(doc.id).collection('prices').stream()
price_broto_stream = promotions_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = promotions_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_promotions.append(product)
def on_two_flavored_pizzas_snapshot(doc_snapshot, changes, read_time):
global all_two_flavored_pizzas
all_two_flavored_pizzas = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = two_flavored_pizzas_ref.document(doc.id).collection('images').stream()
prices = two_flavored_pizzas_ref.document(doc.id).collection('prices').stream()
price_broto_stream = two_flavored_pizzas_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = two_flavored_pizzas_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_two_flavored_pizzas.append(product)
def on_users_snapshot(doc_snapshot, changes, read_time):
global all_users
all_users = []
for doc in doc_snapshot:
user = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
all_users.append(user)
# Watch the document
cat_watch = categories_ref.on_snapshot(on_categories_snapshot)
nab_watch = non_alcoholic_beverages_ref.on_snapshot(on_nab_snapshot)
ab_watch = alcoholic_beverages_ref.on_snapshot(on_ab_snapshot)
beers_watch = beers_ref.on_snapshot(on_beers_snapshot)
candy_pizzas_watch = candy_pizzas_ref.on_snapshot(on_candy_pizzas_snapshot)
flapts_watch = flapts_ref.on_snapshot(on_flapts_snapshot)
pizza_edges_watch = pizza_edges_ref.on_snapshot(on_pizza_edges_snapshot)
traditional_pizzas_watch = traditional_pizzas_ref.on_snapshot(on_traditional_pizzas_snapshot)
gourmet_pizzas_watch = gourmet_pizzas_ref.on_snapshot(on_gourmet_pizzas_snapshot)
wines_watch = wines_ref.on_snapshot(on_wines_snapshot)
promotions_watch = promotions_ref.on_snapshot(on_promotions_snapshot)
two_flavored_pizzas_watch = two_flavored_pizzas_ref.on_snapshot(on_two_flavored_pizzas_snapshot)
users_watch = users_ref.on_snapshot(on_users_snapshot)
def monitor_watches():
global cat_watch
global nab_watch
global ab_watch
global beers_watch
global candy_pizzas_watch
global flapts_watch
global pizza_edges_watch
global traditional_pizzas_watch
global gourmet_pizzas_watch
global wines_watch
global promotions_watch
global two_flavored_pizzas_watch
global users_watch
threading.Timer(30.0, monitor_watches).start()
if cat_watch._closed:
cat_watch = categories_ref.on_snapshot(on_categories_snapshot)
if nab_watch._closed:
nab_watch = non_alcoholic_beverages_ref.on_snapshot(on_nab_snapshot)
if ab_watch._closed:
ab_watch = alcoholic_beverages_ref.on_snapshot(on_ab_snapshot)
if beers_watch._closed:
beers_watch = beers_ref.on_snapshot(on_beers_snapshot)
if candy_pizzas_watch._closed:
candy_pizzas_watch = candy_pizzas_ref.on_snapshot(on_candy_pizzas_snapshot)
if flapts_watch._closed:
flapts_watch = flapts_ref.on_snapshot(on_flapts_snapshot)
if pizza_edges_watch._closed:
pizza_edges_watch = pizza_edges_ref.on_snapshot(on_pizza_edges_snapshot)
if traditional_pizzas_watch._closed:
traditional_pizzas_watch = traditional_pizzas_ref.on_snapshot(on_traditional_pizzas_snapshot)
if gourmet_pizzas_watch._closed:
gourmet_pizzas_watch = gourmet_pizzas_ref.on_snapshot(on_gourmet_pizzas_snapshot)
if wines_watch._closed:
wines_watch = wines_ref.on_snapshot(on_wines_snapshot)
if promotions_watch._closed:
promotions_watch = promotions_ref.on_snapshot(on_promotions_snapshot)
if two_flavored_pizzas_watch._closed:
two_flavored_pizzas_watch = two_flavored_pizzas_ref.on_snapshot(on_two_flavored_pizzas_snapshot)
if users_watch._closed:
users_watch = users_ref.on_snapshot(on_users_snapshot)
monitor_watches()
def setImageUrl(url):
global imageurl
imageurl = url
@app.route("/")
def home():
return render_template("index.html")
@app.route("/accounts", methods=["GET"])
def getAccounts():
return jsonify(accounts)
# @app.route("/img", methods=["GET"])
# def getImages():
# return json.dumps(data)
@app.route("/account/<id>", methods=["GET"])
def getAccount(id):
id = int(id) - 1
return jsonify(accounts[id])
@app.route("/add", methods=['GET', 'POST'])
def create():
"""
create() : Add document to Firestore collection with request body
Ensure you pass a custom ID as part of json body in post request
e.g. json={'id': '1', 'title': 'Write a blog post'}
"""
try:
data = {
u'name': u'Los Angeles',
u'state': u'CA',
u'country': u'USA'
}
# id = request.json['id']
id = todo_ref.document().id
user = User(uid=u'Tokyo', register_date=u'21/09/2019', main_address_id=u'main_address_id', image=u'image',
name=u'Tokyo', phone=None, email=u'Japan')
todo_ref.add(user.to_dict())
# todo_ref.document(id).set(request.json)
# todo_ref.document(id).set(data)
# todo_ref.add(data)
return jsonify({"success": True}), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route("/makeorder", methods=['POST'])
def makeorder():
# dd/mm/YY
# today = datetime.now()
# # today = today.strftime("%d-%m-%Y")
# today = today.strftime("%Y-%m-%d %H:%M:%S")
today = request.get_json().get('date_time')
startdata = {
u'id': u'{0}'.format(today[:-9])
}
thisOrderRef = orders_ref.document(today[:-9])
thisOrderRef.set(startdata)
thisOrderRef = thisOrderRef.collection(today[:-9])
order_ref_for_update = thisOrderRef
# print("hoje é: {0}".format(today))
try:
coupon_id = request.get_json().get('coupon_id')
delivery = request.get_json().get('delivery')
payment_method = request.get_json().get('payment_method')
payment_change = request.get_json().get('payment_change')
delivery_address = request.get_json().get('delivery_address')
total = request.get_json().get('total')
userId = request.get_json().get('userId')
id = thisOrderRef.document().id
products_id = request.get_json().get('products_id')
# print(products_id)
data = {
u'coupon_id': u'{}'.format(coupon_id),
u'dateTime': u'{}'.format(today),
u'id': u'{}'.format(id),
u'delivery': u'{}'.format(delivery),
u'payment_method': u'{}'.format(payment_method),
u'payment_change': u'{}'.format(payment_change),
u'delivery_address': u'{}'.format(delivery_address),
u'total': u'{}'.format(total),
u'userId': u'{}'.format(userId)
}
thisOrderRef.document(id).set(data)
thisOrderRef = thisOrderRef.document(id).collection('products_id')
#product.update({'price_broto': None})
# product_dict = literal_eval(products_id)
json_acceptable_string = products_id.replace("'", "\"")
product_dict = json.loads(json_acceptable_string)
# print(product_dict)
total_paid = Decimal('0.00')
for key, value in product_dict.items():
product = value
thisId = thisOrderRef.document().id
paid_price = 0.00
pizza_edge_price = 0.00
pizza_edge_description = ""
product_description = ""
img_url = ""
all_items = []
if product.get("isTwoFlavoredPizza") == 0:
if product.get("product1_category") == "beers":
all_items.extend(all_beers)
elif product.get("product1_category") == "alcoholic_beverages":
all_items.extend(all_alcoholic_beverages)
elif product.get("product1_category") == "flapts":
all_items.extend(all_flapts)
elif product.get("product1_category") == "non_alcoholic_beverages":
all_items.extend(all_non_alcoholic_beverages)
elif product.get("product1_category") == "promotions":
all_items.extend(all_promotions)
elif product.get("product1_category") == "wines":
all_items.extend(all_wines)
elif product.get("product1_category") == "candy_pizzas":
all_items.extend(all_candy_pizzas)
elif product.get("product1_category") == "gourmet_pizzas":
all_items.extend(all_gourmet_pizzas)
elif product.get("product1_category") == "traditional_pizzas":
all_items.extend(all_traditional_pizzas)
if "pizza" not in product.get("product1_category"):
for item in all_items:
if item.get('id') == product.get("product_id"):
paid_price = item.get("price")
product_description = item.get('description')
img_url = item.get('image')
else:
if product.get("pizza_edge_id") != "null":
for pizza_edge in all_pizza_edges:
if pizza_edge.get('id') == product.get("pizza_edge_id"):
pizza_edge_description = pizza_edge.get("description")
if product.get("size") == "Broto":
pizza_edge_price = pizza_edge.get("price_broto")
if product.get("size") == "Inteira":
pizza_edge_price = pizza_edge.get("price_inteira")
for item in all_items:
if item.get('id') == product.get("product_id"):
product_description = item.get('description')
img_url = item.get('image')
if product.get("size") == "Broto":
paid_price = item.get("price_broto")
if product.get("size") == "Inteira":
paid_price = item.get("price_inteira")
new_price = Decimal(paid_price)+Decimal(pizza_edge_price)
paid_price = round(new_price, 2)
else:
product1_price = 0.00
product2_price = 0.00
if product.get("pizza_edge_id") != "null":
for pizza_edge in all_pizza_edges:
if pizza_edge.get('id') == product.get("pizza_edge_id"):
pizza_edge_description = pizza_edge.get("description")
if product.get("size") == "Broto":
pizza_edge_price = pizza_edge.get("price_broto")
if product.get("size") == "Inteira":
pizza_edge_price = pizza_edge.get("price_inteira")
if product.get("product1_category") == "traditional_pizzas":
all_items.extend(all_traditional_pizzas)
elif product.get("product1_category") == "gourmet_pizzas":
all_items.extend(all_gourmet_pizzas)
elif product.get("product1_category") == "candy_pizzas":
all_items.extend(all_candy_pizzas)
for product1 in all_items:
if product1.get('id') == product.get("product_id"):
product_description = product1.get('description')
img_url = "https://storage.googleapis.com/dom-marino-ws.appspot.com/categories/custom/two_flavored_pizza_image.png"
if product.get("size") == "Broto":
product1_price = product1.get("price_broto")
if product.get("size") == "Inteira":
product1_price = product1.get("price_inteira")
all_items = []
if product.get("product2_category") == "traditional_pizzas":
all_items.extend(all_traditional_pizzas)
elif product.get("product2_category") == "gourmet_pizzas":
all_items.extend(all_gourmet_pizzas)
elif product.get("product2_category") == "candy_pizzas":
all_items.extend(all_candy_pizzas)
for product2 in all_items:
if product2.get('id') == product.get("product2_id"):
product_description += " + "+product2.get('description')
if product.get("size") == "Broto":
product2_price = product2.get("price_broto")
if product.get("size") == "Inteira":
product2_price = product2.get("price_inteira")
product1_decimal_price = Decimal(product1_price)
product2_decimal_price = Decimal(product2_price)
max_price = max(product1_decimal_price, product2_decimal_price)
pizza_edge_decimal_price = Decimal(pizza_edge_price)
max_price_decimal = Decimal(max_price)
new_price = max_price_decimal+pizza_edge_decimal_price
paid_price = new_price
thisProduct = {
u'category': u'{}'.format(product.get("category")),
u'notes': u'{}'.format(product.get("notes")),
u'id': u'{}'.format(thisId),
u'paid_price': u'{}'.format(paid_price),
u'pizza_edge_id': u'{}'.format(product.get("pizza_edge_id")),
u'pizza_edge_description': u'{}'.format(pizza_edge_description),
u'pizza_edge_paid_price': u'{}'.format(pizza_edge_price),
u'product1_category': u'{}'.format(product.get("product1_category")),
u'product2_category': u'{}'.format(product.get("product2_category")),
u'product2_id': u'{}'.format(product.get("product2_id")),
u'product_description': u'{}'.format(product_description),
u'product_id': u'{}'.format(product.get("product_id")),
u'product_image_url': u'{}'.format(img_url),
u'quantity': u'{}'.format(product.get("quantity")),
u'isTwoFlavoredPizza': u'{}'.format(product.get("isTwoFlavoredPizza")),
u'size': u'{}'.format(product.get("size"))
}
total_paid += Decimal(paid_price)*Decimal(product.get("quantity"))
thisOrderRef.document(thisId).set(thisProduct)
delivery_tax_ref_snapshot = db.collection('delivery_tax').document('current_tax').get()
tax = delivery_tax_ref_snapshot.to_dict()['value']
if delivery_address.lower() != "retirada":
total_paid += Decimal(tax)
order_ref_for_update.document(id).update({u'total': str(round(total_paid, 2))})
return jsonify({"success": True}), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/get_working_hours', methods=['GET'])
def get_working_hours():
week_day = request.args.get('weekDay')
docSnapshot = working_hours_ref.document(week_day).get()
return jsonify(docSnapshot.to_dict()), 200
@app.route('/list_user_orders', methods=['GET'])
def list_user_orders():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
all_orders=[]
user_id = request.args.get('id')
docSnapshot = orders_ref.stream()
for doc in docSnapshot:
data_stream = orders_ref.document(doc.id).collection(doc.id).where(u'userId', u'==', user_id).stream()
for order in data_stream:
thisOrder = order.to_dict()
tempMap = dict()
products_stream = orders_ref.document(doc.id).collection(doc.id).document(order.id).collection("products_id").stream()
# thisProductDict = {}
for product in products_stream:
thisProduct = product.to_dict()
# thisOrder["products_id"][product.id] = thisProduct
tempMap[product.id] = thisProduct
thisOrder.update({"products_id": tempMap})
# print(thisProduct)
all_orders.append(thisOrder)
try:
# Check if ID was passed to URL query
return jsonify(all_orders), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_categories', methods=['GET'])
def list_categories():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_categories) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
cat_id = request.args.get('id')
if cat_id:
category = object
for element in all_categories:
if element['id'] == cat_id:
category = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(category), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_categories), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/create_user', methods=['GET', 'POST'])
def create_user():
# user_id = users_ref.document().id
# print("Posted file: {}".format(request.files['image_file']))
# file = request.files['image_file']
# files = {'file': file.read()}
uid = request.form['uid']
name = request.form['name']
email = request.form['email']
phone = request.form['phone']
street = request.form['street']
streetNumber = request.form['streetNumber']
neighborhood = request.form['neighborhood']
city = request.form['city']
imgUrl = request.form['img_url']
isRegisterComplete = request.form['isRegisterComplete']
# print('entrou2', file=sys.stdout, flush=True)
if request.form['hasImageFile'] == "True":
image = request.files['image_file'].read()
print('imagem não é nula', file=sys.stdout, flush=True)
# print(u'Received document snapshot: {}'.format(doc.id))
# session = ftplib.FTP_TLS('157.230.167.73', 'root', '27031984As')
# # file = open('kitten.jpg', 'rb') # file to send
# session.storbinary('STOR /var/www/powermemes.com/dommarino/{}.jpg'.format(uid), image) # send the file
# image.close() # close file and FTP
# session.quit()
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
with pysftp.Connection(host='157.230.167.73', username='root', password='27031984As', cnopts=cnopts) as sftp:
print("Connection succesfully stablished ... ")
# Switch to a remote directory
if not sftp.isdir('/var/www/powermemes.com/htdocs/dommarino/userimg/{}'.format(uid)):
sftp.mkdir('/var/www/powermemes.com/htdocs/dommarino/userimg/{}'.format(uid))
sftp.cwd('/var/www/powermemes.com/htdocs/dommarino/userimg/{}'.format(uid))
img_id = str(uuid.uuid1())
print('imge id={}'.format(img_id))
f = sftp.open('/var/www/powermemes.com/htdocs/dommarino/userimg/{0}/{1}.png'.format(uid, img_id), 'wb')
f.write(image)
# sftp.put(image.file.name, '/var/www/powermemes.com/dommarino/{}.jpg'.format(uid))
# print(products_id)
imgUrl = "https://powermemes.com/dommarino/userimg/{0}/{1}.png".format(uid, img_id)
elif imgUrl=="":
imgUrl="https://powermemes.com/dommarino/userimg/avatar.png"
data = {
u'uid': u'{}'.format(uid),
u'name': u'{}'.format(name),
u'email': u'{}'.format(email),
u'phone': u'{}'.format(phone),
u'street': u'{}'.format(street),
u'streetNumber': u'{}'.format(streetNumber),
u'neighborhood': u'{}'.format(neighborhood),
u'city': u'{}'.format(city),
u'image_url': u'{}'.format(imgUrl),
u'isRegisterComplete': u'{}'.format(isRegisterComplete),
}
users_ref.document(uid).set(data)
print(data)
return jsonify({"success": True}), 200
# print(image)
@app.route('/list_users', methods=['GET'])
def list_users():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
try:
# Check if ID was passed to URL query
user_id = request.args.get('uid')
if user_id:
user_snapshot = users_ref.document(user_id).get()
user = user_snapshot.to_dict()
return jsonify(user), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_users), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_non_alcoholic_beverages', methods=['GET'])
def list_non_alcoholic_beverages():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_non_alcoholic_beverages) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
nab_id = request.args.get('id')
if nab_id:
nab = object
for element in all_non_alcoholic_beverages:
if element['id'] == nab_id:
nab = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(nab), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_non_alcoholic_beverages), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_alcoholic_beverages', methods=['GET'])
def list_alcoholic_beverages():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_alcoholic_beverages) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
ab_id = request.args.get('id')
if ab_id:
ab = object
for element in all_alcoholic_beverages:
if element['id'] == ab_id:
ab = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(ab), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_alcoholic_beverages), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_beers', methods=['GET'])
def list_beers():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_beers) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
beer_id = request.args.get('id')
if beer_id:
beer = object
for element in all_beers:
if element['id'] == beer_id:
beer = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(beer), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_beers), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_candy_pizzas', methods=['GET'])
def list_candy_pizzas():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_candy_pizzas) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
candypizza_id = request.args.get('id')
if candypizza_id:
candypizza = object
for element in all_candy_pizzas:
if element['id'] == candypizza_id:
candypizza = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(candypizza), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_candy_pizzas), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_flapts', methods=['GET'])
def list_flapts():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_flapts) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
flapts_id = request.args.get('id')
if flapts_id:
flapt = object
for element in all_flapts:
if element['id'] == flapts_id:
flapt = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(flapt), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_flapts), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_pizza_edges', methods=['GET'])
def list_pizza_edges():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_pizza_edges) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
pizza_edge_id = request.args.get('id')
if pizza_edge_id:
pizza_edge = object
for element in all_pizza_edges:
if element['id'] == pizza_edge_id:
pizza_edge = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(pizza_edge), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_pizza_edges), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_products', methods=['GET'])
def list_products():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
# while len(all_traditional_pizzas) == 0:
# time.sleep(1)
try:
# Check if ID was passed to URL query
product_id = request.args.get('id')
category_id = request.args.get('category_id')
all_items = []
if category_id == "beers":
while len(all_beers) == 0:
time.sleep(1)
all_items.extend(all_beers)
elif category_id == "alcoholic_beverages":
while len(all_alcoholic_beverages) == 0:
time.sleep(1)
all_items.extend(all_alcoholic_beverages)
elif category_id == "flapts":
while len(all_flapts) == 0:
time.sleep(1)
all_items.extend(all_flapts)
elif category_id == "non_alcoholic_beverages":
while len(all_non_alcoholic_beverages) == 0:
time.sleep(1)
all_items.extend(all_non_alcoholic_beverages)
elif category_id == "promotions":
while len(all_promotions) == 0:
time.sleep(1)
all_items.extend(all_promotions)
elif category_id == "wines":
while len(all_wines) == 0:
time.sleep(1)
all_items.extend(all_wines)
elif category_id == "candy_pizzas":
while len(all_candy_pizzas) == 0:
time.sleep(1)
all_items.extend(all_candy_pizzas)
elif category_id == "gourmet_pizzas":
while len(all_gourmet_pizzas) == 0:
time.sleep(1)
all_items.extend(all_gourmet_pizzas)
elif category_id == "traditional_pizzas":
while len(all_traditional_pizzas) == 0:
time.sleep(1)
all_items.extend(all_traditional_pizzas)
elif category_id == "pizza_edges":
while len(all_pizza_edges) == 0:
time.sleep(1)
all_items.extend(all_pizza_edges)
elif category_id == "two_flavored_pizzas":
while len(all_two_flavored_pizzas) == 0:
time.sleep(1)
all_items.extend(all_two_flavored_pizzas)
if product_id:
product = object
for element in all_items:
if element['id'] == product_id:
product = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(product), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_items), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_traditional_pizzas', methods=['GET'])
def list_traditional_pizzas():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_traditional_pizzas) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
trad_pizza_id = request.args.get('id')
if trad_pizza_id:
trad_pizza = object
for element in all_traditional_pizzas:
if element['id'] == trad_pizza_id:
trad_pizza = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(trad_pizza), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_traditional_pizzas), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_gourmet_pizzas', methods=['GET'])
def list_gourmet_pizzas():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_gourmet_pizzas) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
gourmet_pizza_id = request.args.get('id')
if gourmet_pizza_id:
gourmet_pizza = object
for element in all_gourmet_pizzas:
if element['id'] == gourmet_pizza_id:
gourmet_pizza = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(gourmet_pizza), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_gourmet_pizzas), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_wines', methods=['GET'])
def list_wines():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_wines) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
wine_id = request.args.get('id')
if wine_id:
wine = object
for element in all_wines:
if element['id'] == wine_id:
wine = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(wine), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_wines), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_promotions', methods=['GET'])
def list_promotions():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_promotions) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
promotion_id = request.args.get('id')
if promotion_id:
promotion = object
for element in all_promotions:
if element['id'] == promotion_id:
promotion = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(promotion), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_promotions), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_two_flavored_pizzas', methods=['GET'])
def list_two_flavored_pizzas():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_two_flavored_pizzas) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
two_flavored_pizza_id = request.args.get('id')
if two_flavored_pizza_id:
two_flavored_pizza = object
for element in all_two_flavored_pizzas:
if element['id'] == two_flavored_pizza_id:
two_flavored_pizza = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(two_flavored_pizza), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_two_flavored_pizzas), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/user', methods=['GET'])
def retrieve_user():
try:
id = request.args.get('id')
user_snapshot = users_ref.document(id).get()
user = user_snapshot.to_dict()
orders_id_snapshot = users_ref.document(id).collection("orders_id").stream()
user['orders_id'] = {}
for order in orders_id_snapshot:
# print(order.to_dict())
# orders.append(order.to_dict())
user['orders_id'][order.id] = {}
user['orders_id'][order.id]["id"] = order.id
# print(user)
# pedidos = dict(itertools.zip_longest(*[iter(orders)] * 2, fillvalue=""))
# print(orders)
# user.update(orders)
return jsonify(user), 200
# return jsonify({"success": True}), 200
except Exception as e:
print('error')
return f"An Error Occured: {e}"
@app.route('/update', methods=['POST', 'PUT'])
def update():
"""
update() : Update document in Firestore collection with request body
Ensure you pass a custom ID as part of json body in post request
e.g. json={'id': '1', 'title': 'Write a blog post today'}
"""
try:
id = request.json['id']
todo_ref.document(id).update(request.json)
return jsonify({"success": True}), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/delete', methods=['GET', 'DELETE'])
def delete():
"""
delete() : Delete a document from Firestore collection
"""
try:
# Check for ID in URL query
todo_id = request.args.get('id')
todo_ref.document(todo_id).delete()
return jsonify({"success": True}), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route("/account", methods=['GET', 'POST'])
def addAccount():
# import requests
#
# # Data
# data = {
# 'data1': 'something',
# 'data2': 'otherthing'
# }
#
# # Custom headers
# headers = {
# 'content-type': 'multipart/form-data'
# }
#
# # Get response from server
# response = requests.post('http://localhost/', data=data, headers=headers)
#
# # If you care about the response
# print(response.json())
# with application.app_context():
# cur = mysql.connection.cursor()
# cur.execute('INSERT INTO users(id, name, email, phone) VALUES (%s, %s, %s, %s)',
# ('27siod037581984', 'Rogério Pires', 'l2othujk7857jkrs2703@gmail.com', '+5518988021682'))
# mysql.connection.commit()
# cur.close()
return jsonify(accounts[1])
port = int(os.environ.get('PORT', 8080))
if __name__ == '__main__':
# application.run(debug=True)#, host='0.0.0.0',port=5000)
app.run(threaded=True, host='0.0.0.0', port=port)
|
__author__ = 'Li Bai'
"""the available data are loaded from nordhavn3_april.csv, and the explanatory variables are weather forecasts
('temperature', 'humidity', 'DNI (Direct normal irradiance)', 'windspeed') and
the output is heat load. Considering the time-frequency domain analysis, 'Day sin',
'Day cos', 'Week sin', 'Week cos' are added as input features for feature selection
with PACF analysis, it can be seen that all the weather related variables are 3-lagged correlated. therefore,
all the above variables are considered at time t, t-1 and t-2; However, for the heat load for day-ahead forecast,
only the heat load before t-24 can be known ahead of time, thus only heat load at t-24 and t-25 are used as inputs
The feature selection results in this dataset are ['heat-lag-0','heat-lag-24', 'heat-lag-25', 'temperature-lag-0', 'temperature-lag-1',
'temperature-lag-2', 'humidity-lag-2', 'windspeed-lag-2','DNI-lag-2', 'Day cos-lag-2']; such results are used for
artifiical intelligence methods (SK-learn package and Tensorflow packages) and online learning methods"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from sklearn.metrics import mean_absolute_error, mean_squared_error
import tensorflow as tf
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
from helpers import add_day_week_features, data_gene, feature_selection, LAG_DICT, SHIFT_HEAT, LAG_DICT1, SHIFT_HEAT1
df1_ww = pd.read_csv ('D:\\OneDrive\\OneDrive - Danmarks Tekniske '
'Universitet\\energydataDTU\\venv\\data_gene'
'\\nornhavn3_april'
'.csv',sep=',', index_col=0)
# load files
df1_ww.index=pd.to_datetime(df1_ww.index)
df1_ww['windspeed']=np.sqrt(df1_ww['windx'].to_numpy()**2+df1_ww[
'windy'].to_numpy()**2)
df_ww_copy = df1_ww.copy()
df_ww_copy=pd.DataFrame(columns=['heat', 'temperature', 'humidity',
'DNI','windspeed'], index=df1_ww.index)
df_ww_copy['heat']=df1_ww['Counter [MWh]']
df_ww_copy['temperature']=df1_ww['temperature']
df_ww_copy['DNI']=df1_ww['solarflux']
df_ww_copy['windspeed']=df1_ww['windspeed']
df_ww_copy['humidity']=df1_ww['humidity']
# plot PACF or ACF and plot FFT spectrum
# plot_acf_or_pacf(df_ww_copy)
# fft_analy(df_ww_copy)
# # heat load comes from space heating!
# fall = df_ww_copy[(df_ww_copy.index >= '2018-1-21 00:00:00')
# & (df_ww_copy.index < '2020-07-05 00:00:00')]
df=add_day_week_features(df_ww_copy)
df1_new=data_gene(LAG_DICT, SHIFT_HEAT, df)
index_start=24-df1_new.index[0].hour
index_end=1+df1_new.index[-1].hour
df1_new=df1_new.iloc[index_start:-index_end,:]
df1_new_copy=df1_new.copy()
# '2018-01-21 00:00:00' ~ '2020-07-05 23:00:00'
# select the heating season data
start0=datetime.datetime(2018,1,22,0,0,0);
end0=datetime.datetime(2018,5,31,23,0,0);
start1=datetime.datetime(2018,9,24,0,0,0);
end1=datetime.datetime(2019,5,31,23,0,0);
start2=datetime.datetime(2019,9,24,0,0,0);
end2=datetime.datetime(2020,5,31,23,0,0);
date_gene0 = pd.date_range(start=start0, end=end0, freq='H').tolist()
date_gene1 = pd.date_range(start=start1, end=end1, freq='H').tolist()
date_gene2 = pd.date_range(start=start2, end=end2, freq='H').tolist()
dates = date_gene0 + date_gene1 + date_gene2
# 3:1 for train and test
df1_new=df1_new.loc[dates,:]
N_total = len(df1_new)
N_train=int(int(N_total*0.75/24)*24);
train_df=df1_new[0:N_train]
# normalization!!!
train_df_copy=train_df.copy()
train_df_mean=train_df_copy.mean()
train_df_std=train_df_copy.std()
train_df_copy=(train_df_copy-train_df_mean)/train_df_std
y_train=train_df_copy['heat-lag-0']
train_df_copy.pop('heat-lag-0')
X_train=train_df_copy.copy()
feature_set_lasso, feature_set_xtree, feature_set_info=feature_selection(df1_new, X_train, y_train, alpha=0.05,
n_estimators=20)
# ========================feature selection results========================
columns=['heat-lag-0','heat-lag-24', 'heat-lag-25', 'temperature-lag-0', 'temperature-lag-1',
'temperature-lag-2', 'humidity-lag-2', 'windspeed-lag-2','DNI-lag-2', 'Day cos-lag-2']
# ===========the selected columns are saved to "nordhavn_terminal3_selected.csv"================
# df1_out=df1_new[columns]
# df1_out.to_csv("nordhavn_terminal3_selected.csv")
#
|
import sys
from collections import OrderedDict
import pandas as pd
import numpy as np
import operator as op
import tensorflow as tf
from .common import constructNetwork
from .common import constructNetworkWithoutDropout
from .common import convertDateColsToInt
from .common import arrayToText
from .common import constructCleverHansModel
from .common import loadConfig
def main(test_path, model_path):
loadConfig('./config')
test_data = pd.read_csv(test_path)
print(("test_drivers data size %d\n"%test_data.shape[0]))
print("Raw data loaded successfully.....\n")
# Intepret params
param_path = model_path+'.param'
param_file = open(param_path)
lines = param_file.readlines()
X_mean_train = []
X_std_train = []
expandMap = OrderedDict()
i = 0
while i < len(lines):
line = lines[i].strip()
if line == '':
i+=1
continue
if line == 'X_mean:':
i+=1
line = lines[i].strip()
X_mean_train = [float(x.strip()) for x in line.split(',') if x.strip()]
i+=1
continue
if line == 'X_std:':
i+=1
line = lines[i].strip()
X_std_train = [float(x.strip()) for x in line.split(',') if x.strip()]
i+=1
continue
tokens = line.split(':')
k = tokens[0].strip()
if len (tokens) == 1:
expandMap[k] = []
i+=1
continue
v = tokens[1].strip()
if v == '':
expandMap[k] = []
else:
expandMap[k] = [x.strip() for x in v.split(',,,') if x.strip()]
i+=1
param_file.close()
Y_LABEL = 'Default'
KEYS = [i for i in list(test_data.keys()) if i != Y_LABEL]
TEST_SIZE = test_data.shape[0]
N_POSITIVE = test_data[Y_LABEL].sum()
N_INPUT = test_data.shape[1] - 1
N_CLASSES = 2
print("Variables loaded successfully...\n")
print(("Number of predictors \t%s" %(N_INPUT)))
print(("Number of classes \t%s" %(N_CLASSES)))
print(("TESTING_SIZE \t%s"%(TEST_SIZE)))
print(("Number of positive instances \t%s" %(N_POSITIVE)))
print("\n")
print("Metrics displayed:\tPrecision\n")
date_cols = ['OrDate','FirstPayment']
test_data = convertDateColsToInt(test_data, date_cols)
print("Start expanding the test data: ")
nan_cols = test_data[test_data.columns[test_data.isnull().any()]]
test_data.drop(nan_cols.columns, axis=1, inplace=True)
cat = test_data[list(expandMap.keys())]
print(("Expand cat data "+str(cat.columns.values)+"\n"))
num = test_data.drop(cat.columns, axis=1)
data = pd.DataFrame()
for i in cat.columns:
if len(expandMap[i]) == 0:
continue
tmp = pd.DataFrame(0, index = np.arange(test_data.shape[0]), columns = expandMap[i])
tmp1 = pd.get_dummies(cat[i], prefix=str(i), drop_first=True)
for col in tmp1.columns:
if col in tmp.columns:
tmp[col] = tmp1[col]
data = pd.concat([data, tmp], axis=1)
test_data = pd.concat([num,data,nan_cols], axis=1).reset_index(drop=True)
print("Expand categorical features.\n")
print("After expanding: \n")
ori_KEYS = KEYS
N_INPUT = test_data.shape[1] - 1
KEYS = [i for i in list(test_data.keys()) if i != Y_LABEL]
print(("Number of predictors \t%s" %(N_INPUT)))
print(KEYS)
X_test = test_data[KEYS].get_values()
y_test = test_data[Y_LABEL].get_values()
X_test = (X_test - X_mean_train)/ X_std_train
#------------------------------------------------------------------------------
# Neural net construction
# Tf placeholders
X = tf.placeholder(tf.float32, [None, N_INPUT])
y = tf.placeholder(tf.int64, [None])
dropout_keep_prob = tf.placeholder(tf.float32)
pred, layerList = constructNetwork(X,dropout_keep_prob,N_INPUT,N_CLASSES)
# Loss and optimizer
logits = pred
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)) # softmax loss
correct_prediction = tf.equal(tf.argmax(pred, 1), y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
confusion = tf.confusion_matrix(y, tf.argmax(pred,1), 2)
print("Net built successfully...\n")
print("Starting training...\n")
#------------------------------------------------------------------------------
# Training
# Launch session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, model_path+'.ckpt')
print("Testing...\n")
#------------------------------------------------------------------------------
# Testing
#test_acc = sess.run(accuracy, feed_dict={X: X_test, y: y_test, dropout_keep_prob:1.})
#
#test_conf = sess.run(confusion, feed_dict={X: X_test, y: y_test, dropout_keep_prob:1.})
test_conf = np.zeros((2,2))
indices = np.arange(0, X_test.shape[0])
for batch_indices in np.array_split(indices, 100):
batch_xs = X_test[batch_indices, :]
batch_ys = y_test[batch_indices]
test_conf += sess.run(confusion, feed_dict = {X:batch_xs, y: batch_ys, dropout_keep_prob:1.})
accuracy = (test_conf[0][0] + test_conf[1][1])/float(np.sum(test_conf))
print(("Testing accuracy: %.3f" % accuracy))
print(test_conf)
sess.close()
print("Session closed!")
if __name__ == '__main__':
test_path = sys.argv[1]
model_path = sys.argv[2]
main(test_path, model_path)
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
class Utils():
def __init__(self):
pass
def get_otsu_threshold(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
thresh = cv2.threshold(gray, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
return thresh
def align_image(self, thresh):
#thresh = get_otsu_threshold(image)
shape = thresh.shape
zeros = np.zeros((thresh.shape[0], 500))
thresh = np.hstack([zeros,thresh,zeros])
shape = thresh.shape
zeros = np.zeros((500, thresh.shape[1]))
thresh = np.vstack([zeros,thresh,zeros])
#show(thresh)
coords = np.column_stack(np.where(thresh.T > 0))
#print(coords.shape)
rows,cols = thresh.shape[:2]
[vx,vy,x,y] = cv2.fitLine(coords, cv2.DIST_WELSCH,0,0.01,0.1)
lefty = int((-x*vy/vx) + y)
righty = int(((cols-x)*vy/vx)+y)
#cv2.line(thresh,(cols-1,righty),(0,lefty),(255,255,255),10)
angle = (vy/vx)*180/3.14
(h, w) = thresh.shape
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(thresh, M, (w, h),flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
#rotated = imutils.rotate(thresh, -angle)
return rotated
def crop_signature_fast(self,image):
h,w = image.shape
xmin = 0
xmax = w-1
ymin = 0
ymax = h-1
for i in range(w):
if np.sum(image[:,i]) > image.shape[1] * 0.85:
#print(np.sum(image[:,i]))
xmin = i
break
for i in range(w-1, 0, -1):
if np.sum(image[:,i]) > image.shape[1] * 0.85:
#print(np.sum(image[:,i]))
xmax = i
break
for i in range(h-1, 0, -1):
if np.sum(image[i]) > image.shape[0] * 0.85:
#print(np.sum(image[i]))
ymax = i
break
for i in range(h):
if np.sum(image[i]) > image.shape[0] * 0.85:
#print(np.sum(image[i]))
ymin = i
break
crop_sig = image[ymin:ymax , xmin:xmax]
return crop_sig
def pad(self, img):
new_img = np.zeros((150,550))
if img.shape[0] == 140:
k1 = int((550-img.shape[1])/2)
k2 = int((550-img.shape[1])/2 + img.shape[1]%2)
new_img[5:-5,k1:-k2] = img
else:
k1 = int((150-img.shape[0])/2)
k2 = int((150-img.shape[0])/2 + img.shape[0]%2)
new_img[k1:-k2,5:-5] = img
return new_img
def resize(self,img):
p1 = img.shape[0]/140
p2 = img.shape[1]/540
if p1>p2:
p2 = int(img.shape[1]/p1)
p1 = 140
else:
p1 = int(img.shape[0]/p2)
p2 = 540
resized = cv2.resize(img, (p2,p1), interpolation = cv2.INTER_AREA)
resized = self.pad(resized)
return resized
def process(self,img):
img = self.get_otsu_threshold(img)
img = self.align_image(img)
img = self.crop_signature_fast(img)
img = self.resize(img)
return img
def show_images_sidebyside(self, im1, im2, cmap = 'gray'):
fig, ax = plt.subplots(1,2)
fig.set_figheight(10)
fig.set_figwidth(10)
ax[0].imshow(im1, cmap = cmap);
ax[1].imshow(im2, cmap = cmap);
plt.show()
|
from django import template
register = template.Library()
from video.models import Video
@register.inclusion_tag('video/tags/lattest_videos.html', takes_context=True)
def lattest_videos(context):
lattest_videos = Video.objects.all().filter( galleries__is_public=True).order_by('created')[:6]
context['lattest_videos'] = lattest_videos
return context
|
import logging
from datetime import datetime
import requests
from common import db_session
from configuration import API_KEY
from .models import TrainActivity
BASE_V2_URL = 'http://realtime.mbta.com/developer/api/v2'
logger = logging.getLogger(__name__)
def format_mbta_request_url(api_key: str):
return '{}/predictionsbyroutes?api_key={}&routes=Green-b&format=json'.format(BASE_V2_URL, api_key)
def get_and_insert_current_predictions_by_routes() -> (int, int):
"""Queries the MBTA and upserts an observation row for each datapoint
Returns: (number of new rows inserted, number of rows upserted)
"""
list_of_train_activities = get_current_predictions_by_routes()
with db_session(autoflush=False, echo=True) as session:
with session.no_autoflush:
for activity in list_of_train_activities:
session.merge(activity)
new_records_count = len(session.new)
updated_records_count = len(list_of_train_activities) - new_records_count
return new_records_count, updated_records_count
def get_current_predictions_by_routes(api_key=API_KEY) -> [TrainActivity]:
"""Queries the MBTA api and returns a list with an activity data point for each vehicle"""
# r = requests.get('http://realtime.mbta.com/developer/api/v2/predictionsbyroute?api_key=wX9NwuHnZU2ToO7GmGR9uw&route=Green-B&direction=1&format=json')
request_url = format_mbta_request_url(api_key)
r = requests.get(request_url)
json = r.json()['mode'][0]['route'][0]
# top level data for each datapoint
route_id = json.get('route_id')
route_name = json.get('route_name')
eastbound = json.get('direction')[1]
direction_id = eastbound.get('direction_id')
direction_name = eastbound.get('direction_name')
trips = eastbound.get('trip')
list_of_train_activities: [TrainActivity] = []
for trip in trips:
trip_data = trip.get('vehicle')
trip_data['route_id'] = route_id
trip_data['route_name'] = route_name
trip_data['direction_id'] = direction_id
trip_data['direction_name'] = direction_name
trip_data.update({'trip_id': trip.get('trip_id')})
trip_data.update({'trip_name': trip.get('trip_name')})
trip_data.update({'trip_headsign': trip.get('trip_headsign')})
trip_data.update({'timestamp': datetime.fromtimestamp(int(trip_data.get('vehicle_timestamp')))})
list_of_train_activities.append(TrainActivity(**trip_data))
return list_of_train_activities
def get_observations_since(high_water_timestamp=0) -> ([TrainActivity], int):
with db_session() as session:
observations = session.query(TrainActivity).filter(TrainActivity.vehicle_timestamp > high_water_timestamp)
all_obs = observations.all()
if not all_obs:
return None, None
new_high_water_mark = max([obs.vehicle_timestamp for obs in observations])
session.expunge_all()
return all_obs, new_high_water_mark
|
# coding: utf-8
"""
The sum of the squares of the first ten natural numbers is,
1^(2) + 2^(2) + ... + 10^(2) = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^(2) = 55^(2) = 3025
Hence the difference between the sum of the squares of the first ten natural
numbers and the square of the sum is 3025 − 385 = 2640.
Find the difference between the sum of the squares of the first one hundred
natural numbers and the square of the sum.
From http://projecteuler.net/index.php?section=problems&id=6
"""
def problem006(min, max):
sum_of_squares = sum([i**2 for i in range(min, max + 1)])
square_of_sums = sum(range(min, max + 1)) ** 2
return square_of_sums - sum_of_squares
if __name__ == '__main__':
assert problem006(1, 10) == 2640
print problem006(1, 100)
|
# -*- coding:utf-8 -*-
import os
def getPlus(a, b):
k1 = len(str(a))
s1 = str(a)
k2 = len(str(b))
s2 = str(b)
print k1, type(s1), s1, " |--| ", k2, type(s2), s2
p = list()
k = 0
for item_b in s2[::-1]:
index = k
for item_a in s1[::-1]:
num = int(item_a) * int(item_b)
if len(p) == index:
p.append(num)
index += 1
continue
p[index] += num
index += 1
k += 1
print len(p), p
for x in range(len(p)):
if x == len(p) - 1:
p[x] = str(p[x])
continue
if p[x] / 10 == 0:
p[x] = str(p[x])
print x, type(p[x]), p[x]
continue
elif p[x] / 10 != 0:
m = p[x] / 10
p[x + 1] += m
p[x] = str(p[x] % 10)
res = "".join(p[::-1])
print len(res), res
return res
if __name__ == "__main__":
t = list([1, 2, 3])
print max(t), min(t)
print t
print type(20 % 10), 20 % 10
res = getPlus(str(999999999999999999999999), str(9996646168496898169999999))
print "res:", type(res), res
|
from selenium import webdriver
from selenium.common.exceptions import ElementClickInterceptedException, NoSuchElementException
from selenium.webdriver.common.keys import Keys
import time
from password import *
driver = webdriver.Chrome("C:\Chromedriver\chromedriver")
URL = "https://tinder.com/"
driver.get(URL)
driver.find_element_by_xpath("/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div/div/header/div/div[2]/div[2]/a").click()
main_page = driver.current_window_handle
print(main_page)
print(driver.title)
time.sleep(1)
driver.find_element_by_xpath("//button[@aria-label='Zaloguj się przez Facebooka']").click()
time.sleep(5)
# changing the handles to access login page
for handle in driver.window_handles:
if handle != main_page:
login_page = handle
# change the control to signin page
driver.switch_to.window(login_page)
driver.find_element_by_xpath("//button[@title='Akceptuj wszystkie']").click()
time.sleep(1)
driver.find_element_by_xpath("//input[@type='text']").send_keys(FACEBOOK_EMAIL)
driver.find_element_by_xpath("//input[@type='password']").send_keys(FACEBOOK_PASSWORD, Keys.ENTER)
driver.switch_to.window(main_page)
time.sleep(6)
driver.find_element_by_xpath("//button[@data-testid='allow']").click()
time.sleep(1)
driver.find_element_by_xpath("//button[@data-testid='allow']").click()
page = driver.find_element_by_tag_name('body')
for _ in range(100):
page.send_keys(Keys.ARROW_RIGHT)
time.sleep(3)
try:
driver.find_element_by_xpath("//button[@title='Wróć do Tindera']").click()
except NoSuchElementException:
try:
driver.find_element_by_xpath("/html/body/div[2]/div/div/div[2]/button[2]").click()
except NoSuchElementException:
continue
|
"""Helper set of functions to read in and parse multiple types of input sources."""
import sys
import os
import datetime
from bs4 import BeautifulSoup
from shapely.geometry.polygon import Polygon
def read(ftype, inDir, inSuffix, startTime, endTime):
"""
Determines the user-specified file type and parses it accordingly
Parameters
----------
ftype : string
The file type to process: segmotion, probsevere, or ryan
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
list
[stormCells, totNumCells, numTrackTimes, dates] - List containing dicts of all
storm cells, the number of cells, the number of files, and all valid dates
"""
if ftype == 'ryan': return readRyan(inDir, inSuffix, startTime, endTime)
elif ftype == 'segmotion': return readSegmotion(inDir, inSuffix, startTime, endTime)
elif ftype == 'probsevere': return readProbSevere(inDir, inSuffix, startTime, endTime)
def readRyan(inDir, inSuffix, startTime, endTime):
"""
Parses post-processed segmotion files (.data) from Ryan's original code
Parameters
----------
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
list
[stormCells, totNumCells, numTrackTimes, dates] - List containing dicts of all
storm cells, the number of cells, the number of files, and all valid dates
"""
numTrackTimes = 0
totNumCells = 0
stormCells = {}
dates = []
# Read in Ryan files
for root, dirs, files in os.walk(inDir):
if inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue
for trackFile in files:
if trackFile.endswith('.data'):
# Skip hidden files
if trackFile.startswith('._'): continue
# Check if file falls in date range
try:
fileDate = datetime.datetime.strptime(str(trackFile).split('_')[0], '%Y-%m-%d-%H%M%S')
except ValueError:
print('File ' + str(trackFile) + ' has an invalid name. Expected format YYYY-MM-DD-hhmmss_...')
continue
if not startTime <= fileDate < endTime:
continue
if fileDate.date() not in dates: dates.append(fileDate.date())
# Open file
f = open(root + '/' + trackFile)
lines = f.readlines()
f.close()
# Skip probSevere files
if int(lines[28].split()[0]) == 1:
print('\nWARNING: Unable to process storm objects from probSevere in Ryan format. Use "-t probsevere" instead.')
print(str(trackFile) + ' will be skipped.\n')
continue
print(trackFile)
numTrackTimes += 1
# Get Individual cell metadata
cells = lines[32::5]
numCells = len(cells)
for cell in cells:
cell = cell.split()
cellID = totNumCells
stormCells[cellID] = {'time':fileDate, 'lat':float(cell[0]), 'lon':float(cell[1]), 'latr':float(cell[3]),
'lonr':float(cell[4]), 'orientation':float(cell[8]), 'track':str(cell[9]) + '_' + str(fileDate.date()), 'old_track': str(cell[9])}
totNumCells += 1
return [stormCells, totNumCells, numTrackTimes, dates]
def readSegmotion(inDir, inSuffix, startTime, endTime):
"""
Parses raw segmotion .xml files
Parameters
----------
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
list
[stormCells, totNumCells, numTrackTimes, dates] - List containing dicts of all
storm cells, the number of cells, the number of files, and all valid dates
"""
numTrackTimes = 0
totNumCells = 0
stormCells = {}
dates = []
# Read in Segmotion files
for root, dirs, files in os.walk(inDir):
if inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue
for trackFile in files:
if trackFile.endswith('.xml'):
# Skip hidden files
if trackFile.startswith('._'): continue
# Check if file falls in date range
try:
fileDate = datetime.datetime.strptime(str(trackFile).split('.')[0], '%Y%m%d-%H%M%S')
except ValueError:
print('File ' + str(trackFile) + ' has an invalid name. Expected format YYYYMMDD-hhmmss.xml...')
continue
if not startTime <= fileDate < endTime:
continue
if fileDate.date() not in dates: dates.append(fileDate.date())
# Open file
f = open(root + '/' + trackFile)
lines = BeautifulSoup(f, 'html.parser').find_all('datacolumn')
f.close()
print(trackFile)
numTrackTimes += 1
numCells = len(lines[2].find_all('item'))
for i in range(0, numCells):
time = fileDate
latr = float(str(lines[4].find_all('item')[i]).split('"')[1])
lat = float(str(lines[5].find_all('item')[i]).split('"')[1])
lonr = float(str(lines[6].find_all('item')[i]).split('"')[1])
lon = float(str(lines[7].find_all('item')[i]).split('"')[1])
orientation = float(str(lines[12].find_all('item')[i]).split('"')[1])
track = str(lines[13].find_all('item')[i]).split('"')[1]
cellID = totNumCells
stormCells[cellID] = {'time': time, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon,
'orientation': orientation, 'track': track + '_' + str(fileDate.date()), 'old_track': track}
totNumCells += 1
return [stormCells, totNumCells, numTrackTimes, dates]
def readProbSevere(inDir, inSuffix, startTime, endTime):
"""
Parses probSevere .ascii files
Parameters
----------
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
list
[stormCells, totNumCells, numTrackTimes, dates] - List containing dicts of all
storm cells, the number of cells, the number of files, and all valid dates
"""
numTrackTimes = 0
totNumCells = 0
stormCells = {}
dates = []
# Read in ProbSevere files
for root, dirs, files in os.walk(inDir):
if inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue
for trackFile in files:
if trackFile.endswith('.ascii'):
# Skip hidden files
if trackFile.startswith('._'): continue
# Check if file falls in date range
try:
date = str(trackFile).split('.')[0].split('_')[3]
time = str(trackFile).split('.')[0].split('_')[4]
fileDate = datetime.datetime.strptime(date + '_' + time, '%Y%m%d_%H%M%S')
except ValueError:
print('File ' + str(trackFile) + ' has an invalid name. Expected format SSEC_AWIPS_PROBSEVERE_YYYYMMDD_hhmmss.ascii...')
continue
if not startTime <= fileDate < endTime:
continue
if fileDate.date() not in dates: dates.append(fileDate.date())
# Open file
f = open(root + '/' + trackFile)
lines = f.readlines()
f.close()
print(trackFile)
numTrackTimes += 1
for line in lines:
if line.startswith('Valid:'): continue
data = str(line).split(':')
lats = list(map(float, data[7].split(',')[0::2]))
lons = list(map(float, data[7].split(',')[1::2]))
track = data[8]
latr = (max(lats) - min(lats)) / 2.
lonr = abs(max(lons) - min(lons)) / 2.
# Calculate centroid
points = []
for i in range(0, len(lats)):
points.append((lons[i], lats[i]))
poly = Polygon(points)
lon = poly.centroid.x
lat = poly.centroid.y
cellID = totNumCells
stormCells[cellID] = {'time': fileDate, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon,
'orientation': 'NaN', 'track': track + '_' + str(fileDate.date()), 'old_track': track}
totNumCells += 1
return [stormCells, totNumCells, numTrackTimes, dates]
|
from django.conf.urls import url
from django.urls import include, path
from fichaArticulo import views as ficha_views
from . import views
urlpatterns = [
path('', include(([path('', ficha_views.fichaArticulo, name='fichaArticulo')],'fichaArticulo'), namespace='ficha')),
path(r'', ficha_views.fichaArticulo, name='fichaArticulo'),
url(r'^lista/$', views.lista, name='lista'),
url(r'^reserva/$', views.reserva, name='reserva'),
url(r'^editar/$', views.editar, name='editar'),
url(r'^editarFoto/$', views.editarFoto, name='editarFoto'),
url(r'^editarDes/$', views.editarDes, name='editarDes'),
url(r'^editarRes/$', views.editarRes, name='editarRes'),
url(r'^editarEst/$', views.editarEst, name='editarEst'),
url(r'^aceptarnombre/$', views.aceptarNombre, name='aceptarNombre'),
url(r'^cancelarnombre/$', views.cancelarNombre, name='cancelarNombre'),
url(r'^aceptardes/$', views.aceptarDes, name='aceptarDes'),
url(r'^aceptarest/$', views.aceptarEst, name='aceptarEst'),
url(r'^aceptarfoto/$', views.aceptarFoto, name='aceptarFoto'),
url(r'^aceptarres/$', views.aceptarRes, name='aceptarRes')
]
|
#
# MIT License
#
# Copyright (c) 2018 Matteo Poggi m.poggi@unibo.it
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
def get_disp(x):
disp = 0.3 * conv(x, 2, 3, 1, tf.nn.sigmoid)
return disp
def conv(x, num_out_layers, kernel_size, stride, activation_fn=tf.nn.elu):
p = np.floor((kernel_size - 1) / 2).astype(np.int32)
p_x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]])
return slim.conv2d(p_x, num_out_layers, kernel_size, stride, 'VALID', activation_fn=activation_fn)
def conv_block(x, num_out_layers, kernel_size):
conv1 = conv(x, num_out_layers, kernel_size, 1)
conv2 = conv(conv1, num_out_layers, kernel_size, 2)
return conv2
def maxpool(x, kernel_size):
p = np.floor((kernel_size - 1) / 2).astype(np.int32)
p_x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]])
return slim.max_pool2d(p_x, kernel_size)
def resconv(x, num_layers, stride):
do_proj = tf.shape(x)[3] != num_layers or stride == 2
shortcut = []
conv1 = conv(x, num_layers, 1, 1)
conv2 = conv(conv1, num_layers, 3, stride)
conv3 = conv(conv2, 4 * num_layers, 1, 1, None)
if do_proj:
shortcut = conv(x, 4 * num_layers, 1, stride, None)
else:
shortcut = x
return tf.nn.elu(conv3 + shortcut)
def resblock(x, num_layers, num_blocks):
out = x
for i in range(num_blocks - 1):
out = resconv(out, num_layers, 1)
out = resconv(out, num_layers, 2)
return out
def upconv(x, num_out_layers, kernel_size, scale):
upsample = upsample_nn(x, scale)
convs = conv(upsample, num_out_layers, kernel_size, 1)
return convs
def deconv(x, num_out_layers, kernel_size, scale):
p_x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
convs = slim.conv2d_transpose(p_x, num_out_layers, kernel_size, scale, 'SAME')
return convs[:,3:-1,3:-1,:]
def upsample_nn(x, ratio):
s = tf.shape(x)
h = s[1]
w = s[2]
return tf.image.resize_nearest_neighbor(x, [h * ratio, w * ratio])
|
# encoding: UTF-8
import main
from dal import base_dal
from test_main.constants import *
def test_delete_performance_report():
base_dal.delete_performance_report(YEAR, QUARTER)
if __name__ == '__main__':
main.setup_logging()
test_delete_performance_report()
|
def exercicio4():
print("Programa de calculo de média final")
media1 = int(input("Digite a média do primeiro bimestre: "))
media2 = int(input("Digite a média do segundo bimestre: "))
media3 = int(input("Digite a média do terceiro bimestre: "))
media4 = int(input("Digite a média do quarto bimestre: "))
def media_dos_bimestres():
totalPontos = media1 + media2 + media3 + media4
mediaFinal = totalPontos / 4
return mediaFinal
print (f'A média final é {media_dos_bimestres()}')
exercicio4() |
# Generated by Django 3.1.7 on 2021-03-07 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gamma', '0006_merge_20210304_0032'),
]
operations = [
migrations.AddField(
model_name='post',
name='header_image',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='post',
name='measurement',
field=models.TextField(choices=[('N/A', 'N/A'), ('km', 'km'), ('m', 'm')]),
),
]
|
import csv
import subprocess
# stdout = subprocess.PIPE, stderr = subprocess.PIPE
subprocess.run(
["abaqus", "cae", "noGUI=./abaqusScript/autoParametric2DnoGUI.py"], shell=True)
print("*********************")
with open('force_output.csv', 'r') as file:
reader = csv.reader(file)
for row in reader:
for col in row:
print(float(col))
|
# Generated by Django 2.0 on 2018-01-24 07:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Coin',
fields=[
('coin_id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('coin_name', models.CharField(max_length=50)),
('github_link', models.TextField()),
('official_site_link', models.TextField()),
('total_contributors', models.IntegerField()),
('active_contributors', models.IntegerField()),
('read_me_score', models.IntegerField(default=0)),
('issues_score', models.IntegerField(default=0)),
('issues_open', models.IntegerField()),
('issues_closed', models.IntegerField()),
('pr_score', models.IntegerField(default=0)),
('pr_open', models.IntegerField()),
('pr_closed', models.IntegerField()),
('pr_merged', models.IntegerField()),
],
),
]
|
# coding: utf-8
from flask import Flask, render_template
from flask import request
import funpy.app as funpy
app = Flask(__name__) #インスタンス生成
@app.route('/weather',methods=['GET', 'POST'])
def add_numbers():
lat = request.args.get('lat')
lng = request.args.get('lng')
num = funpy.api(lat,lng)
return render_template('index.html',message = num)
@app.route("/") #アプリケーション/indexにアクセスが合った場合
def index():
num = funpy.api(35.681167,139.767052)
return render_template('index.html',message = num) #/indexにアクセスが来たらtemplates内のindex.htmlが開きます
#ここがサーバーサイドからクライアントサイドへなにかを渡すときのポイントになります。
if __name__ == "__main__":
# webサーバー立ち上げ
app.run()
|
import os
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
bucket = os.getenv("INFLUX_BUCKET")
host = os.getenv("INFLUX_HOST")
port = os.getenv("INFLUX_PORT")
org = os.getenv("INFLUX_ORG")
token = os.getenv("INFLUX_TOKEN")
class HiveData(object):
def __init__(self, host: str, port: int, bucket: str, token: str, org: str):
url = f"http://{host}:{port}"
self._bucket = bucket
self._client = InfluxDBClient(url=url, token=token, org=org)
self._write_api = self._client.write_api(write_options=SYNCHRONOUS)
self._query_api = self._client.query_api()
async def write_point(self, mac: str, weight: float, temp_in: float, temp_out: float) -> None:
p = (
Point("hivedata")
.tag("board", mac)
.field("weight", weight)
.field("temperature_inside", temp_in)
.field("temperature_outside", temp_out)
)
self._write_api.write(bucket=self._bucket, record=p)
|
from django.contrib import admin
from .models import Evento
from .models import Professores
from .models import Alunos, Cursos
admin.site.register(Evento)
admin.site.register(Professores)
admin.site.register(Alunos)
admin.site.register(Cursos)
|
"""
Tests Deploy CLI
"""
from subprocess import CalledProcessError, PIPE
from unittest import TestCase
from mock import patch, call
from samcli.lib.samlib.cloudformation_command import execute_command, find_executable
class TestExecuteCommand(TestCase):
def setUp(self):
self.args = ("--arg1", "value1", "different args", "more")
@patch("subprocess.check_call")
@patch("samcli.lib.samlib.cloudformation_command.find_executable")
def test_must_add_template_file(self, find_executable_mock, check_call_mock):
find_executable_mock.return_value = "mycmd"
check_call_mock.return_value = True
execute_command("command", self.args, "/path/to/template")
check_call_mock.assert_called_with(["mycmd", "cloudformation", "command"] +
["--arg1", "value1", "different args", "more",
"--template-file", "/path/to/template"])
@patch("sys.exit")
@patch("subprocess.check_call")
@patch("samcli.lib.samlib.cloudformation_command.find_executable")
def test_command_must_exit_with_status_code(self, find_executable_mock, check_call_mock, exit_mock):
find_executable_mock.return_value = "mycmd"
check_call_mock.side_effect = CalledProcessError(2, "Error")
exit_mock.return_value = True
execute_command("command", self.args, None)
exit_mock.assert_called_with(2)
class TestFindExecutable(TestCase):
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_use_raw_name(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "Linux"
execname = "foo"
find_executable(execname)
self.assertEquals(popen_mock.mock_calls, [
call([execname], stdout=PIPE, stderr=PIPE)
])
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_use_name_with_cmd_extension_on_windows(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "windows"
execname = "foo"
expected = "foo.cmd"
result = find_executable(execname)
self.assertEquals(result, expected)
self.assertEquals(popen_mock.mock_calls, [
call(["foo.cmd"], stdout=PIPE, stderr=PIPE)
])
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_use_name_with_exe_extension_on_windows(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "windows"
execname = "foo"
expected = "foo.exe"
popen_mock.side_effect = [OSError, "success"] # fail on .cmd extension
result = find_executable(execname)
self.assertEquals(result, expected)
self.assertEquals(popen_mock.mock_calls, [
call(["foo.cmd"], stdout=PIPE, stderr=PIPE),
call(["foo.exe"], stdout=PIPE, stderr=PIPE)
])
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_use_name_with_no_extension_on_windows(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "windows"
execname = "foo"
expected = "foo"
popen_mock.side_effect = [OSError, OSError, "success"] # fail on .cmd and .exe extension
result = find_executable(execname)
self.assertEquals(result, expected)
self.assertEquals(popen_mock.mock_calls, [
call(["foo.cmd"], stdout=PIPE, stderr=PIPE),
call(["foo.exe"], stdout=PIPE, stderr=PIPE),
call(["foo"], stdout=PIPE, stderr=PIPE),
])
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_raise_error_if_executable_not_found(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "windows"
execname = "foo"
popen_mock.side_effect = [OSError, OSError, OSError, "success"] # fail on all executable names
with self.assertRaises(OSError) as ctx:
find_executable(execname)
expected = "Unable to find AWS CLI installation under following names: {}".format(["foo.cmd", "foo.exe", "foo"])
self.assertEquals(expected, str(ctx.exception))
self.assertEquals(popen_mock.mock_calls, [
call(["foo.cmd"], stdout=PIPE, stderr=PIPE),
call(["foo.exe"], stdout=PIPE, stderr=PIPE),
call(["foo"], stdout=PIPE, stderr=PIPE),
])
|
from Calculator import BasicArithmeticOperation0_1 as BAO
# import BasicArithmeticOperation0_1 as BAO
import numpy as np
import time
import matplotlib.pyplot as plt
def trapezium_area(top, base, height):
area = BAO.multi(1 / 2, (BAO.multi(BAO.add(top, base), height)))
return area
def integration_simp(equ, start, end, n):
h = (end-start)/(n)
x_values = np.arange(start, end+h, h)
area = 0
print("size : " + str(x_values.size))
for i in range(100):
if i in [0, 49, 98, 99]:
print("0", end="")
elif i in [24, 48, 74]:
print("5", end="")
elif i == 23:
print("2", end="")
elif i == 73:
print("7", end="")
elif i == 97:
print("1", end="")
else:
print("*", end="")
print()
y_values = np.zeros(x_values.size)
# print(y_values.size)
for i in range(x_values.size):
y_values[i] = eval(equ.replace("X", str(x_values[i])))
# print("-1 : " + str(y_values[-1]))
# print("4 sum : " + str((y_values[2:-2].sum())))
# print(y_values[2:-1])
first = y_values[0]
print("first : ", end = "")
print(first)
end = y_values[-1]
print("end : ", end="")
print(end)
twice = 2*y_values[2:-2:2]
print("twice : ", end="")
print(twice)
sum_twice = twice.sum()
print("sum_twice : ", end="")
print(sum_twice)
four_times = 4*y_values[1:-1:2]
print("four_times : ", end="")
print(four_times)
sum_four_times = four_times.sum()
print("sum_four_times : ", end="")
print(sum_four_times)
area = y_values[0] + y_values[-1] + 2*y_values[2:-2:2].sum() + 4*y_values[1:-1:2].sum()
return area / (3*n)
def integration_simp_check(equ, start, end, step):
x_values = np.arange(start, end, step)
area = 0
# print(x_values)
for i in range(100):
if i in [0, 49, 98, 99]:
print("0", end="")
elif i in [24, 48, 74]:
print("5", end="")
elif i == 23:
print("2", end="")
elif i == 73:
print("7", end="")
elif i == 97:
print("1", end="")
else:
print("*", end="")
print()
y_values = np.zeros(x_values.size)
# print(y_values.size)
for i in range(x_values.size):
y_values[i] = eval(equ.replace("X", str(x_values[i])))
# if i % (x_values.size // 100) == 0:
# print("*", end="")
for i in range(y_values.size - 2):
area += (y_values[i] + 4 * y_values[i + 1] + y_values[i + 2]) * step / 3
return area
def integration_trape(equ, start, end, step):
x_values = np.arange(start, end, step)
area = 0
# print(x_values)
for i in range(100):
if i in [0, 49, 98, 99]:
print("0", end="")
elif i in [24, 48, 74]:
print("5", end="")
elif i == 23:
print("2", end="")
elif i == 73:
print("7", end="")
elif i == 97:
print("1", end="")
else:
print("*", end="")
print()
y_values = np.zeros(x_values.size)
# print(y_values.size)
for i in range(x_values.size):
y_values[i] = eval(equ.replace("X", str(x_values[i])))
for i in range(y_values.size - 1):
area += trapezium_area(y_values[i], y_values[i + 1], step)
# if i % (x_values.size // 100) == 0:
# print("*", end="")
return area
# a = np.array([0, 1, 2, 3, 4])
# a_squared = a*a
# print(a_squared)
# a_2 = a_squared+2
# print(a_2)
# a_2_4 = a_2*4
# print(a_2_4)
#
# result = a_2[0] + a_2_4[1] + a_2[2] + a_2[1] + a_2_4[2] + a_2[3] + a_2[2] + a_2_4[3] + a_2[4]
# print(result/3)
# start_time = time.time()
# print("check\n" + str(integration_simp_check("X*X+2", 0, 5, 0.5)))
# end_time = time.time()
# print(end_time - start_time)
start_time = time.time()
print("simpton\n" + str(integration_simp("X*X", 1, 4, 10)))
end_time = time.time()
print(end_time - start_time)
start_time = time.time()
print(integration_trape("X*X", 0, 5, 0.001))
end_time = time.time()
print(end_time - start_time)
#
# x = np.zeros(10000) # x軸の値
# y1 = np.zeros(x.size) # y軸の値
#
# for i in range(x.size-2):
# y1[i] = integration_simp("X*X+2", 0, 5, 5/(i+1))
# # figureを生成する
# fig = plt.figure()
#
# # axをfigureに設定する
# ax = fig.add_subplot(1, 1, 1)
#
# # axesにplot
# ax.plot(x, y1, "-", linewidth=1)
#
# # 表示する
# plt.show() |
from django.db import models
from django.utils.text import slugify
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Blog(models.Model):
title = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
content = models.TextField()
date_posted = models.DateTimeField(default = timezone.now())
category = models.ForeignKey('blog.Category',on_delete = models.CASCADE)
author = models.ForeignKey(User, on_delete = models.CASCADE)
def save(self,*args,**kwargs):
self.slug=slugify(self.title)
super(Blog, self).save(*args,**kwargs)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('view_post', kwargs={'slug': self.slug})
class Category(models.Model):
title = models.CharField(max_length=100, db_index=True)
slug = models.SlugField(max_length=100, db_index=True)
def save(self,*args,**kwargs):
self.slug=slugify(self.title)
super(Category, self).save(*args,**kwargs)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('home') |
import os
import json
def store_string_xml(xml_results, field_values, field_name):
'''Format values for writing xml output'''
for field_number in range(0, len(field_values)):
field_number_name = str(field_number).zfill(2)
k = field_name + field_number_name
xml_results[k] = field_values[field_number]
def write_ispyb_xml(filename, full_command_line, write_directory, xml_results):
'''Write items in the _xml_results into an XML file to be stored in
ISPyB'''
xml_template = os.path.join(os.environ['FAST_EP_ROOT'],
'lib', 'templates', 'ispyb.xml')
phs_stat_fom_template = os.path.join(
os.environ['FAST_EP_ROOT'], 'lib', 'templates',
'phasing_statistics_fom.xml')
phs_stat_mapcc_template = os.path.join(
os.environ['FAST_EP_ROOT'], 'lib', 'templates',
'phasing_statistics_mapcc.xml')
if not os.path.exists(xml_template):
print('XML template not found: %s' % xml_template)
return
if not os.path.exists(phs_stat_fom_template):
print('XML template not found: %s' % phs_stat_fom_template)
return
if not os.path.exists(phs_stat_mapcc_template):
print('XML template not found: %s' % phs_stat_mapcc_template)
return
# get phasing statistics from xml_results
(all_phs_stat_fom, all_phs_stat_mapcc) = get_phasing_statistics(
phs_stat_fom_template, phs_stat_mapcc_template, xml_results)
import datetime
time_stamp = '%4d-%02d-%02d %02d:%02d:%02d' % tuple(datetime.datetime.now(
).timetuple()[:6])
open(filename, 'w').write(
open(xml_template, 'r').read().format(
commandline = full_command_line,
results_directory = write_directory,
spacegroup_id = xml_results['SPACEGROUP'],
solvent_content = xml_results['SOLVENTCONTENT'],
enantiomorph = xml_results['ENANTIOMORPH'],
lowres = xml_results['LOWRES'],
highres = xml_results['HIGHRES'],
shelxc_spacegroup = xml_results['SHELXC_SPACEGROUP_ID'],
substructure_method = xml_results['SUBSTRUCTURE_METHOD'],
phasing_statistics_fom = all_phs_stat_fom,
phasing_statistics_mapcc = all_phs_stat_mapcc,
time_stamp = time_stamp
))
def get_phasing_statistics(fom_template, cc_template, xml_results):
total_bins = 1
all_phs_stat_fom = ""
all_phs_stat_mapcc = ""
# find number of bins - use RESOLUTION_LOW as the field to check for this
done = False
while not done:
bin_number_name = str(total_bins).zfill(2)
try:
resolution_low = xml_results['RESOLUTION_LOW' + bin_number_name]
except KeyError:
done = True
continue
total_bins += 1
for bin_number in range(total_bins):
bin_number_name = str(bin_number).zfill(2)
resolution_low = float(xml_results['RESOLUTION_LOW' + bin_number_name])
resolution_high = float(xml_results['RESOLUTION_HIGH' + bin_number_name])
fom = float(xml_results['FOM' + bin_number_name])
mapcc = float(xml_results['MAPCC' + bin_number_name])
nreflections = int(xml_results['NREFLECTIONS' + bin_number_name])
if resolution_low == None or resolution_high == None or \
fom == None or mapcc == None or nreflections == None:
raise RuntimeError("One of the fields is empty.")
all_phs_stat_fom += open(fom_template,'r').read().format(
bin_number = bin_number + 1,
number_bins = total_bins,
bin_low_res = resolution_low,
bin_high_res = resolution_high,
bin_fom = fom,
num_refl = nreflections)
all_phs_stat_mapcc += open(cc_template,'r').read().format(
bin_number = bin_number + 1,
number_bins = total_bins,
bin_low_res = resolution_low,
bin_high_res = resolution_high,
bin_map_cc = mapcc,
num_refl = nreflections)
return (all_phs_stat_fom, all_phs_stat_mapcc)
def xmlfile2json(filename):
'''Parse an ISpyB XML file into a JSON formatted string'''
from lxml import etree
tree = etree.parse(filename)
xml_dict = __node2json(tree.getroot())
return json.dumps(xml_dict, indent=4, separators=(',', ':'))
def __node2json(node):
if len(node):
node_dict = {}
for child in node:
if child.tag in node_dict:
if isinstance(node_dict[child.tag], list):
node_dict[child.tag].append(__node2json(child))
else:
node_dict[child.tag] = [node_dict[child.tag], __node2json(child)]
else:
node_dict[child.tag] = __node2json(child)
if node_dict:
return node_dict
return None
else:
return node.text
|
from rest_framework.exceptions import APIException
from rest_framework import status
class ConflictError(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = 'Conflict'
class InternalServiceError(APIException):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = 'Internal server error, try again later.'
default_code = 'internal_server_error'
class ServiceUnavailableError(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = 'Service temporarily unavailable, try again later.'
default_code = 'service_unavailable'
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import cityscapesscripts.helpers.labels as CSLabels
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from mmengine.fileio import dump
from mmengine.utils import (Timer, mkdir_or_exist, track_parallel_progress,
track_progress)
def collect_files(img_dir, gt_dir):
suffix = 'leftImg8bit.png'
files = []
for img_file in glob.glob(osp.join(img_dir, '**/*.png')):
assert img_file.endswith(suffix), img_file
inst_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png'
# Note that labelIds are not converted to trainId for seg map
segm_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_labelIds.png'
files.append((img_file, inst_file, segm_file))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
print('Loading annotation images')
if nproc > 1:
images = track_parallel_progress(load_img_info, files, nproc=nproc)
else:
images = track_progress(load_img_info, files)
return images
def load_img_info(files):
img_file, inst_file, segm_file = files
inst_img = mmcv.imread(inst_file, 'unchanged')
# ids < 24 are stuff labels (filtering them first is about 5% faster)
unique_inst_ids = np.unique(inst_img[inst_img >= 24])
anno_info = []
for inst_id in unique_inst_ids:
# For non-crowd annotations, inst_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = inst_id // 1000 if inst_id >= 1000 else inst_id
label = CSLabels.id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
category_id = label.id
iscrowd = int(inst_id < 1000)
mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F')
mask_rle = maskUtils.encode(mask[:, :, None])[0]
area = maskUtils.area(mask_rle)
# convert to COCO style XYWH format
bbox = maskUtils.toBbox(mask_rle)
# for json encoding
mask_rle['counts'] = mask_rle['counts'].decode()
anno = dict(
iscrowd=iscrowd,
category_id=category_id,
bbox=bbox.tolist(),
area=area.tolist(),
segmentation=mask_rle)
anno_info.append(anno)
video_name = osp.basename(osp.dirname(img_file))
img_info = dict(
# remove img_prefix for filename
file_name=osp.join(video_name, osp.basename(img_file)),
height=inst_img.shape[0],
width=inst_img.shape[1],
anno_info=anno_info,
segm_file=osp.join(video_name, osp.basename(segm_file)))
return img_info
def cvt_annotations(image_infos, out_json_name):
out_json = dict()
img_id = 0
ann_id = 0
out_json['images'] = []
out_json['categories'] = []
out_json['annotations'] = []
for image_info in image_infos:
image_info['id'] = img_id
anno_infos = image_info.pop('anno_info')
out_json['images'].append(image_info)
for anno_info in anno_infos:
anno_info['image_id'] = img_id
anno_info['id'] = ann_id
out_json['annotations'].append(anno_info)
ann_id += 1
img_id += 1
for label in CSLabels.labels:
if label.hasInstances and not label.ignoreInEval:
cat = dict(id=label.id, name=label.name)
out_json['categories'].append(cat)
if len(out_json['annotations']) == 0:
out_json.pop('annotations')
dump(out_json, out_json_name)
return out_json
def parse_args():
parser = argparse.ArgumentParser(
description='Convert Cityscapes annotations to COCO format')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--img-dir', default='leftImg8bit', type=str)
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = args.out_dir if args.out_dir else cityscapes_path
mkdir_or_exist(out_dir)
img_dir = osp.join(cityscapes_path, args.img_dir)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
set_name = dict(
train='instancesonly_filtered_gtFine_train.json',
val='instancesonly_filtered_gtFine_val.json',
test='instancesonly_filtered_gtFine_test.json')
for split, json_name in set_name.items():
print(f'Converting {split} into {json_name}')
with Timer(print_tmpl='It took {}s to convert Cityscapes annotation'):
files = collect_files(
osp.join(img_dir, split), osp.join(gt_dir, split))
image_infos = collect_annotations(files, nproc=args.nproc)
cvt_annotations(image_infos, osp.join(out_dir, json_name))
if __name__ == '__main__':
main()
|
import os
import pathlib
from unittest.mock import Mock
import cv2
import numpy as np
from liveprint.lp import Projector, WhiteBackground
from liveprint.pose import PosesFactory, Poses, Keypoint, TorsoKeyPoints
from liveprint.utils import Apng
class FakePosesFactory(PosesFactory):
def poses(self, image):
return FakePoses()
class FakePoses(Poses):
def torso_keypoints(self, threshold=0.15):
return iter([FakeTorsoKeypoints()])
class FakeKeypoint(Keypoint):
def __init__(self, number, x, y, score):
self.number = number
self.x = x
self.y = y
self.score = score
def threshold(self, thresh):
return self.score >= thresh
def coords(self):
return int(self.x), int(self.y)
class FakeTorsoKeypoints(TorsoKeyPoints):
def left_shoulder(self) -> "Keypoint":
return self._left_shoulder
def right_shoulder(self) -> "Keypoint":
return self._right_shoulder
def left_hip(self) -> "Keypoint":
return self._left_hip
def right_hip(self) -> "Keypoint":
return self._right_hip
def __init__(
self,
left_shoulder=(740, 161,),
right_shoulder=(875, 150,),
left_hip=(759, 308,),
right_hip=(862, 311,),
):
self._left_shoulder = FakeKeypoint(5, *left_shoulder, 0.6)
self._right_shoulder = FakeKeypoint(6, *right_shoulder, 0.6)
self._left_hip = FakeKeypoint(11, *left_hip, 0.6)
self._right_hip = FakeKeypoint(12, *right_hip, 0.6)
class FakeProjectableRegion:
def __init__(self, output_height=768, output_width=1024):
self._output_resolution = (output_height, output_width, 3)
def of(self, webcam_img):
return 255 * np.ones(shape=self._output_resolution, dtype=np.uint8)
def test_projector():
path = os.path.join(
pathlib.Path(__file__).parent.absolute(), "..", "resources", "test_image_1.png"
)
projectable_region_dims = [768, 1024]
output_image = Projector(
WhiteBackground([*projectable_region_dims, 3]),
FakePosesFactory(),
FakeProjectableRegion(*projectable_region_dims),
Apng([cv2.imread(path, cv2.IMREAD_UNCHANGED)]),
).project(Mock())
expected_image = cv2.imread(
os.path.join(
pathlib.Path(__file__).parent.absolute(),
"..",
"resources",
"test_output_1.png",
),
cv2.IMREAD_UNCHANGED,
)
np.testing.assert_almost_equal(output_image, expected_image)
class TransparentBackground:
def layers(self):
return []
def test_projector_transparent_background():
path = os.path.join(
pathlib.Path(__file__).parent.absolute(), "..", "resources", "test_image_1.png"
)
projectable_region_dims = [768, 1024]
output_image = Projector(
TransparentBackground(),
FakePosesFactory(),
FakeProjectableRegion(*projectable_region_dims),
Apng([cv2.imread(path, cv2.IMREAD_UNCHANGED)]),
).project(None)
expected_image = cv2.imread(
os.path.join(
pathlib.Path(__file__).parent.absolute(),
"..",
"resources",
"test_output_2.png",
),
cv2.IMREAD_UNCHANGED,
)
np.testing.assert_almost_equal(output_image, expected_image)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 02:40:45 2020
@author: amk170930
"""
import airsim
import numpy as np
import setup_path
import os
from datetime import datetime
import time
class frequencyTest:
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
client.enableApiControl(True)
car_controls = airsim.CarControls()
start = time.time()
prevTime = start
car_state = client.getCarState()
def carStateFreq(self):
#Test variables
revTime = 2 #seconds
brakeTime = 1 #seconds
tot = 0
for idx in range(10):
#Go reverse
self.car_controls.throttle = -0.5
self.car_controls.is_manual_gear = True;
self.car_controls.manual_gear = -1
self.car_controls.steering = 0
self.client.setCarControls(self.car_controls)
print("Go reverse")
time.sleep(revTime) # let car drive a bit
self.car_controls.is_manual_gear = False; # change back gear to auto
self.car_controls.manual_gear = 0
# apply brakes
self.car_controls.brake = 1
self.client.setCarControls(self.car_controls)
print("Apply brakes")
time.sleep(brakeTime) # let car drive a bit
self.car_controls.brake = 0 #remove brake
#Time calculations
currentTime = time.time()
self.car_state = self.client.getCarState()
diff = float((currentTime - self.prevTime - revTime - brakeTime)*1000)#miliseconds
self.prevTime = currentTime
freq = 1000/diff #Hertz
tot = tot + freq
print("Difference: %f Frequency: %f" % (diff,freq))
print("\nAverage frequency: %f"% (tot/10.0))
def Freq():
client = airsim.CarClient()
VehicleClient = airsim.VehicleClient()
sensor_state = VehicleClient.getImuData()
car_controls = airsim.CarControls()
testCases = 10
revTime = 0#seconds
time1 = time.time()
for sensor in range(5):
idx = 0
tot = 0
if sensor == 0:
print("\n\n\nIMU Data:")
elif sensor ==1:
print("\n\n\nBarometer Data:")
elif sensor == 2:
print("\n\n\nMagnetometer Data:")
elif sensor == 3:
print("\n\n\nGps Data:")
elif sensor == 4:
print("\n\n\nDistance Sensor Data:")
#prevTime = datetime.now().timestamp()
prevTime = sensor_state.time_stamp/1000000000
while idx <=testCases:
#Go reverse
car_controls.throttle = -0.5
car_controls.is_manual_gear = True;
car_controls.manual_gear = -1
car_controls.steering = 0
client.setCarControls(car_controls)
#print("Go reverse")
time.sleep(revTime) # let car drive a bit
car_controls.is_manual_gear = False; # change back gear to auto
car_controls.manual_gear = 0
if sensor == 0:
sensor_state = VehicleClient.getImuData()
elif sensor ==1:
sensor_state = VehicleClient.getBarometerData()
elif sensor == 2:
sensor_state = VehicleClient.getMagnetometerData()
elif sensor == 3:
sensor_state = VehicleClient.getGpsData()
elif sensor == 4:
sensor_state = VehicleClient.getDistanceSensorData()
#Time calculations
#currentTime = datetime.now().timestamp()
#car_state = client.getCarState()
currentTime = sensor_state.time_stamp/1000000000 #convert nanoseconds to seconds
diff = (((currentTime - prevTime)-revTime)*1000)#miliseconds
prevTime = currentTime
if diff !=0:
freq = 1000/diff #Hertz
tot = tot + freq
else:
#print("0 difference encountered")
continue
#print("Difference (In miliseconds): %f Frequency (Hz): %f" % (diff,freq))
idx = idx + 1
time2 = time.time()
print("\nAverage frequency: %f"% (float(idx)/(time2-time1)))
#frequencyTest.carStateFreq()
frequencyTest.Freq()
|
#!/usr/bin/env python3
# NOTE: NEEDS SYNCHRONIZATION FOR MULTITHREADING
import random
import string
from enum import Enum, auto
from abc import ABC,abstractmethod
from typing import (
Dict,
List,
Optional,
Tuple
)
def _random_id() -> str:
ascii = string.ascii_lowercase
return "".join(random.choices(ascii, k=GameGateway.NUM_ROOM_LETTERS))
class JoinReturnCodes(Enum):
SUCCESS = auto()
NAME_IN_USE = auto()
ROOM_NOT_FOUND = auto()
class GetReturnCodes(Enum):
SUCCESS = auto()
ROOM_NOT_FOUND = auto()
NAME_NOT_FOUND = auto()
class StartReturnCodes(Enum):
SUCCESS = auto()
TOO_FEW_PLAYERS = auto()
ALREADY_STARTED = auto()
ROOM_NOT_FOUND = auto()
class InteractReturnCodes(Enum):
SUCCESS = auto()
INVALID_DATA = auto()
WRONG_STATE = auto()
ROOM_NOT_FOUND = auto()
PLAYER_NOT_FOUND = auto()
class Player:
def __init__(self):
pass
class Room(ABC):
def __init__(self):
self.players: Dict[str, Player] = {}
def add_player(self, name) -> bool:
if name in self.players.keys():
return False
self.players[name] = Player()
return True
@abstractmethod
def start(self) -> StartReturnCodes:
pass
@abstractmethod
def get_room_state(self, player) -> Tuple[InteractReturnCodes, str, str]:
pass
@abstractmethod
def submit_data(self, player, data) -> InteractReturnCodes:
pass
class GameGateway:
NUM_ROOM_LETTERS = 4
def __init__(self):
self.rooms: Dict[str, Room] = {}
def room_start(self, room) -> StartReturnCodes:
if room not in self.rooms:
return StartReturnCodes.ROOM_NOT_FOUND
return self.rooms[room].start()
def new_game(self, room_class) -> str:
room = _random_id()
self.rooms[room] = room_class()
return room
def join_room(self, room, name) -> JoinReturnCodes:
try:
success = self.rooms[room].add_player(name)
if success:
return JoinReturnCodes.SUCCESS
else:
return JoinReturnCodes.NAME_IN_USE
except:
return JoinReturnCodes.ROOM_NOT_FOUND
def get_room_state(self, room, name=None) -> Tuple[InteractReturnCodes, str, str]:
if room in self.rooms:
if name is None or name in self.rooms[room].players:
return self.rooms[room].get_room_state(name)
else:
return (InteractReturnCodes.PLAYER_NOT_FOUND, '', '')
return (InteractReturnCodes.ROOM_NOT_FOUND, '', '')
def submit_data(self, room, name, data) -> InteractReturnCodes:
if room in self.rooms:
if name in self.rooms[room].players:
return self.rooms[room].submit_data(name, data)
else:
return InteractReturnCodes.PLAYER_NOT_FOUND
return InteractReturnCodes.ROOM_NOT_FOUND
|
class Solution:
def romanToInt(self, s: str) -> int:
dictionary = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
my_sum = 0
for i in range( len(s) ):
if i ==0:
my_sum = dictionary[s[i]]
elif dictionary[s[i-1]] < dictionary[s[i]]:
# be careful:
# smaller roman literal ‘I’ appears before it,
# we need to subtract ‘I’ from ‘V’
# that we already added another ‘I’ before it,
# so we need to subtract a total of 'two' one’s from it
my_sum = my_sum + dictionary[s[i]] - (2*dictionary[s[i-1]])
else:
my_sum = my_sum + dictionary[s[i]]
return my_sum
|
import numpy as np
from sklearn.linear_model import LogisticRegression
hours = np.array([0.5, 0.75, 1, 1.25, 1.5, 1.75, 1.75, 2, 2.25, 2.5, 2.75, 3, 3.25, 3.5, 4, 4.25, 4.5, 8, 4.75, 5, 5.5]).reshape(-1, 1)
approved = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1])
lr = LogisticRegression()
lr.fit(hours, approved)
new_hours = np.array([1, 5.22, 4, 3.4, 6, 0]).reshape(-1, 1)
prediction = lr.predict(new_hours)
prob_predictions = lr.predict_proba(new_hours)
np.set_printoptions(3)
print('Prediction data:')
print('New Hours: {}'.format(new_hours.reshape(1,-1)))
print('Approved or not: {}'.format(prediction))
print('Probability: {}'.format(prob_predictions[:,1]))
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 8