text
stringlengths 1
93.6k
|
|---|
corrector_kwargs=None, x_T=None, save_intermediate_vid=False, make_progrow=True,ddim_use_x0_pred=False):
|
log = dict()
|
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
|
return_first_stage_outputs=True,
|
force_c_encode=not (hasattr(model, 'split_input_params')
|
and model.cond_stage_key == 'coordinates_bbox'),
|
return_original_cond=True)
|
log_every_t = 1 if save_intermediate_vid else None
|
if custom_shape is not None:
|
z = torch.randn(custom_shape)
|
print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
|
z0 = None
|
log["input"] = x
|
log["reconstruction"] = xrec
|
if ismap(xc):
|
log["original_conditioning"] = model.to_rgb(xc)
|
if hasattr(model, 'cond_stage_key'):
|
log[model.cond_stage_key] = model.to_rgb(xc)
|
else:
|
log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
|
if model.cond_stage_model:
|
log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
|
if model.cond_stage_key =='class_label':
|
log[model.cond_stage_key] = xc[model.cond_stage_key]
|
with model.ema_scope("Plotting"):
|
t0 = time.time()
|
img_cb = None
|
sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
|
eta=eta,
|
quantize_x0=quantize_x0, img_callback=img_cb, mask=None, x0=z0,
|
temperature=temperature, noise_dropout=noise_dropout,
|
score_corrector=corrector, corrector_kwargs=corrector_kwargs,
|
x_T=x_T, log_every_t=log_every_t)
|
t1 = time.time()
|
if ddim_use_x0_pred:
|
sample = intermediates['pred_x0'][-1]
|
x_sample = model.decode_first_stage(sample)
|
try:
|
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
|
log["sample_noquant"] = x_sample_noquant
|
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
|
except:
|
pass
|
log["sample"] = x_sample
|
log["time"] = t1 - t0
|
return log
|
# <FILESEP>
|
from huffmax import Huffmax
|
from keras.layers import Dense, Input
|
from keras.models import Sequential, Model
|
import numpy as np
|
import time
|
batch_size = 32
|
input_dim = 100
|
nb_classes = 100000
|
nb_samples = 10000
|
times = {}
|
X = np.random.random((nb_samples, input_dim))
|
Y_huffmax = np.random.randint(0, nb_classes, size=(nb_samples, 1))
|
Y_softmax = []
|
for _ in range(nb_samples):
|
oh = np.zeros(nb_classes)
|
oh[np.random.randint(0, nb_classes)] = 1
|
Y_softmax += [oh]
|
Y_softmax = np.array(Y_softmax)
|
softmax_model = Sequential()
|
softmax_model.add(Dense(input_dim=input_dim, output_dim=nb_classes, activation='softmax'))
|
softmax_model.compile(loss='mse', optimizer='sgd')
|
softmax_model.predict(X[:1])
|
start_time = time.time()
|
softmax_model.fit(X, Y_softmax, batch_size=batch_size)
|
end_time = time.time()
|
times['Softmax'] = end_time - start_time
|
del softmax_model
|
for mode in [0, 1]:
|
vector = Input((input_dim,))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.