36 #ifndef GENSVM_PRINT_ITER 37 #define GENSVM_PRINT_ITER 100 69 note(
"Starting main loop.\n");
71 note(
"\tn = %i\n", n);
72 note(
"\tm = %i\n", m);
73 note(
"\tK = %i\n", K);
74 note(
"Parameters:\n");
76 note(
"\tp = %f\n", model->
p);
87 Lbar = L + 2.0*model->
epsilon*L;
90 while ((it < model->max_iter) && (Lbar - L)/L > model->
epsilon)
102 note(
"iter = %li, L = %15.16f, Lbar = %15.16f, " 103 "reldiff = %15.16f\n", it, L, Lbar, (Lbar - L)/L);
112 err(
"[GenSVM Warning]: Negative step occurred in " 118 err(
"[GenSVM Warning]: maximum number of iterations " 124 note(
"Optimization finished, iter = %li, loss = %15.16f, " 125 "rel. diff. = %15.16f\n", it-1, L,
163 double value, rowvalue, loss = 0.0;
168 for (i=0; i<
n; i++) {
171 for (j=0; j<
K; j++) {
172 if (j == (data->
y[i]-1))
175 value = pow(value, model->
p);
178 rowvalue = pow(rowvalue, 1.0/(model->
p));
179 rowvalue *= model->
rho[i];
182 loss /= ((double) n);
185 for (i=1; i<m+1; i++) {
186 for (j=0; j<K-1; j++) {
187 value += pow(
matrix_get(model->
V, K-1, i, j), 2.0);
190 loss += model->
lambda * value;
214 for (i=0; i<m+1; i++) {
215 for (j=0; j<K-1; j++) {
247 for (i=0; i<model->
n; i++) {
248 for (j=0; j<model->
K; j++) {
251 if (q <= -model->kappa) {
252 value = 1.0 - q - (model->
kappa+1.0)/2.0;
253 }
else if (q <= 1.0) {
254 value = 1.0/(2.0*model->
kappa+2.0)*pow(1.0 - q,
281 double q, *uu_row = NULL;
288 for (i=0; i<
n; i++) {
289 for (j=0; j<
K; j++) {
290 if (j == (data->
y[i]-1))
292 uu_row = &model->
UU[((data->
y[i]-1)*K+j)*(K-1)];
293 q = cblas_ddot(K-1, &ZV[i*(K-1)], 1, uu_row, 1);
void gensvm_calculate_huber(struct GenModel *model)
Calculate the Huber hinge errors.
long K
number of classes for the workspace
double * H
Huber weighted error matrix.
double epsilon
stopping criterion for the IM algorithm.
void err(const char *fmt,...)
Parse a formatted string and write it to standard error.
double training_error
loss function value after training has finished
void gensvm_simplex(struct GenModel *model)
Generate matrix of simplex vertex coordinates.
void gensvm_get_update(struct GenModel *model, struct GenData *data, struct GenWork *work)
Perform a single step of the majorization algorithm to update V.
long m
number of features for the workspace
double p
parameter for the L-p norm in the loss function
double * UU
simplex difference matrix
void gensvm_calculate_errors(struct GenModel *model, struct GenData *data, double *ZV)
Calculate the scalar errors.
#define matrix_get(M, cols, i, j)
double * ZV
n x (K-1) working matrix for the Z * V calculation
void gensvm_free_work(struct GenWork *work)
Free an allocated GenWork instance.
A structure to hold the GenSVM workspace.
int status
status of the model after training
long gensvm_num_sv(struct GenModel *model)
Calculate the number of support vectors in a model.
struct GenWork * gensvm_init_work(struct GenModel *model)
Initialize the workspace structure.
double * V
augmented weight matrix
#define matrix_add(M, cols, i, j, val)
long * y
array of class labels, 1..K
void gensvm_simplex_diff(struct GenModel *model)
Generate the simplex difference matrix.
A structure to represent the data.
A structure to represent a single GenSVM model.
void gensvm_step_doubling(struct GenModel *model)
Use step doubling.
void gensvm_calculate_ZV(struct GenModel *model, struct GenData *data, double *ZV)
Wrapper around sparse/dense versions of this function.
double gensvm_get_loss(struct GenModel *model, struct GenData *data, struct GenWork *work)
Calculate the current value of the loss function.
long n
number of instances in the dataset
long max_iter
maximum number of iterations of the algorithm
double * rho
vector of instance weights
long elapsed_iter
number of elapsed iterations in training
Header file for gensvm_optimize.c.
void gensvm_optimize(struct GenModel *model, struct GenData *data)
The main training loop for GenSVM.
double kappa
parameter for the Huber hinge function
long K
number of classes in the dataset
#define matrix_set(M, cols, i, j, val)
#define matrix_mul(M, cols, i, j, val)
#define GENSVM_PRINT_ITER
long n
number of instances for the workspace
long m
number of predictor variables in the dataset
double lambda
regularization parameter in the loss function
void note(const char *fmt,...)
Parse a formatted string and write to the output stream.