SphinxBase  5prealpha
ngram_model_set.c
Go to the documentation of this file.
1 /* -*- c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /* ====================================================================
3  * Copyright (c) 2008 Carnegie Mellon University. All rights
4  * reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  * notice, this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  *
18  * This work was supported in part by funding from the Defense Advanced
19  * Research Projects Agency and the National Science Foundation of the
20  * United States of America, and the CMU Sphinx Speech Consortium.
21  *
22  * THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
23  * ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY
26  * NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * ====================================================================
35  *
36  */
42 #include <string.h>
43 #include <stdlib.h>
44 
45 #include "sphinxbase/err.h"
46 #include "sphinxbase/ckd_alloc.h"
47 #include "sphinxbase/strfuncs.h"
48 #include "sphinxbase/filename.h"
49 
50 #include "ngram_model_set.h"
51 
52 static ngram_funcs_t ngram_model_set_funcs;
53 
54 static int
55 my_compare(const void *a, const void *b)
56 {
57  /* Make sure <UNK> floats to the beginning. */
58  if (strcmp(*(char *const *) a, "<UNK>") == 0)
59  return -1;
60  else if (strcmp(*(char *const *) b, "<UNK>") == 0)
61  return 1;
62  else
63  return strcmp(*(char *const *) a, *(char *const *) b);
64 }
65 
66 static void
67 build_widmap(ngram_model_t * base, logmath_t * lmath, int32 n)
68 {
69  ngram_model_set_t *set = (ngram_model_set_t *) base;
70  ngram_model_t **models = set->lms;
71  hash_table_t *vocab;
72  glist_t hlist;
73  gnode_t *gn;
74  int32 i;
75 
76  /* Construct a merged vocabulary and a set of word-ID mappings. */
77  vocab = hash_table_new(models[0]->n_words, FALSE);
78  /* Create the set of merged words. */
79  for (i = 0; i < set->n_models; ++i) {
80  int32 j;
81  for (j = 0; j < models[i]->n_words; ++j) {
82  /* Ignore collisions. */
83  (void) hash_table_enter_int32(vocab, models[i]->word_str[j],
84  j);
85  }
86  }
87  /* Create the array of words, then sort it. */
88  if (hash_table_lookup(vocab, "<UNK>", NULL) != 0)
89  (void) hash_table_enter_int32(vocab, "<UNK>", 0);
90  /* Now we know the number of unigrams, initialize the base model. */
91  ngram_model_init(base, &ngram_model_set_funcs, lmath, n,
92  hash_table_inuse(vocab));
93  base->writable = FALSE; /* We will reuse the pointers from the submodels. */
94  i = 0;
95  hlist = hash_table_tolist(vocab, NULL);
96  for (gn = hlist; gn; gn = gnode_next(gn)) {
97  hash_entry_t *ent = gnode_ptr(gn);
98  base->word_str[i++] = (char *) ent->key;
99  }
100  glist_free(hlist);
101  qsort(base->word_str, base->n_words, sizeof(*base->word_str),
102  my_compare);
103 
104  /* Now create the word ID mappings. */
105  if (set->widmap)
106  ckd_free_2d((void **) set->widmap);
107  set->widmap = (int32 **) ckd_calloc_2d(base->n_words, set->n_models,
108  sizeof(**set->widmap));
109  for (i = 0; i < base->n_words; ++i) {
110  int32 j;
111  /* Also create the master wid mapping. */
112  (void) hash_table_enter_int32(base->wid, base->word_str[i], i);
113  /* printf("%s: %d => ", base->word_str[i], i); */
114  for (j = 0; j < set->n_models; ++j) {
115  set->widmap[i][j] = ngram_wid(models[j], base->word_str[i]);
116  /* printf("%d ", set->widmap[i][j]); */
117  }
118  /* printf("\n"); */
119  }
120  hash_table_free(vocab);
121 }
122 
125  ngram_model_t ** models,
126  char **names, const float32 * weights, int32 n_models)
127 {
128  ngram_model_set_t *model;
129  ngram_model_t *base;
130  logmath_t *lmath;
131  int32 i, n;
132 
133  if (n_models == 0) /* WTF */
134  return NULL;
135 
136  /* Do consistency checking on the models. They must all use the
137  * same logbase and shift. */
138  lmath = models[0]->lmath;
139  for (i = 1; i < n_models; ++i) {
140  if (logmath_get_base(models[i]->lmath) != logmath_get_base(lmath)
141  || logmath_get_shift(models[i]->lmath) !=
142  logmath_get_shift(lmath)) {
143  E_ERROR
144  ("Log-math parameters don't match, will not create LM set\n");
145  return NULL;
146  }
147  }
148 
149  /* Allocate the combined model, initialize it. */
150  model = ckd_calloc(1, sizeof(*model));
151  base = &model->base;
152  model->n_models = n_models;
153  model->lms = ckd_calloc(n_models, sizeof(*model->lms));
154  model->names = ckd_calloc(n_models, sizeof(*model->names));
155  /* Initialize weights to a uniform distribution */
156  model->lweights = ckd_calloc(n_models, sizeof(*model->lweights));
157  {
158  int32 uniform = logmath_log(lmath, 1.0 / n_models);
159  for (i = 0; i < n_models; ++i)
160  model->lweights[i] = uniform;
161  }
162  /* Default to interpolate if weights were given. */
163  if (weights)
164  model->cur = -1;
165 
166  n = 0;
167  for (i = 0; i < n_models; ++i) {
168  model->lms[i] = ngram_model_retain(models[i]);
169  model->names[i] = ckd_salloc(names[i]);
170  if (weights)
171  model->lweights[i] = logmath_log(lmath, weights[i]);
172  /* N is the maximum of all merged models. */
173  if (models[i]->n > n)
174  n = models[i]->n;
175  }
176  /* Allocate the history mapping table. */
177  model->maphist = ckd_calloc(n - 1, sizeof(*model->maphist));
178 
179  /* Now build the word-ID mapping and merged vocabulary. */
180  build_widmap(base, lmath, n);
181  return base;
182 }
183 
186  const char *lmctlfile, logmath_t * lmath)
187 {
188  FILE *ctlfp;
189  glist_t lms = NULL;
190  glist_t lmnames = NULL;
191  __BIGSTACKVARIABLE__ char str[1024];
192  ngram_model_t *set = NULL;
193  hash_table_t *classes;
194  char *basedir, *c;
195 
196  /* Read all the class definition files to accumulate a mapping of
197  * classnames to definitions. */
198  classes = hash_table_new(0, FALSE);
199  if ((ctlfp = fopen(lmctlfile, "r")) == NULL) {
200  E_ERROR_SYSTEM("Failed to open %s", lmctlfile);
201  return NULL;
202  }
203 
204  /* Try to find the base directory to append to relative paths in
205  * the lmctl file. */
206  if ((c = strrchr(lmctlfile, '/')) || (c = strrchr(lmctlfile, '\\'))) {
207  /* Include the trailing slash. */
208  basedir = ckd_calloc(c - lmctlfile + 2, 1);
209  memcpy(basedir, lmctlfile, c - lmctlfile + 1);
210  }
211  else {
212  basedir = NULL;
213  }
214  E_INFO("Reading LM control file '%s'\n", lmctlfile);
215  if (basedir)
216  E_INFO("Will prepend '%s' to unqualified paths\n", basedir);
217 
218  if (fscanf(ctlfp, "%1023s", str) == 1) {
219  if (strcmp(str, "{") == 0) {
220  /* Load LMclass files */
221  while ((fscanf(ctlfp, "%1023s", str) == 1)
222  && (strcmp(str, "}") != 0)) {
223  char *deffile;
224  if (basedir && !path_is_absolute(str))
225  deffile = string_join(basedir, str, NULL);
226  else
227  deffile = ckd_salloc(str);
228  E_INFO("Reading classdef from '%s'\n", deffile);
229  if (read_classdef_file(classes, deffile) < 0) {
230  ckd_free(deffile);
231  goto error_out;
232  }
233  ckd_free(deffile);
234  }
235 
236  if (strcmp(str, "}") != 0) {
237  E_ERROR("Unexpected EOF in %s\n", lmctlfile);
238  goto error_out;
239  }
240 
241  /* This might be the first LM name. */
242  if (fscanf(ctlfp, "%1023s", str) != 1)
243  str[0] = '\0';
244  }
245  }
246  else
247  str[0] = '\0';
248 
249  /* Read in one LM at a time and add classes to them as necessary. */
250  while (str[0] != '\0') {
251  char *lmfile;
252  ngram_model_t *lm;
253 
254  if (basedir && str[0] != '/' && str[0] != '\\')
255  lmfile = string_join(basedir, str, NULL);
256  else
257  lmfile = ckd_salloc(str);
258  E_INFO("Reading lm from '%s'\n", lmfile);
259  lm = ngram_model_read(config, lmfile, NGRAM_AUTO, lmath);
260  if (lm == NULL) {
261  ckd_free(lmfile);
262  goto error_out;
263  }
264  if (fscanf(ctlfp, "%1023s", str) != 1) {
265  E_ERROR("LMname missing after LMFileName '%s'\n", lmfile);
266  ckd_free(lmfile);
267  goto error_out;
268  }
269  ckd_free(lmfile);
270  lms = glist_add_ptr(lms, lm);
271  lmnames = glist_add_ptr(lmnames, ckd_salloc(str));
272 
273  if (fscanf(ctlfp, "%1023s", str) == 1) {
274  if (strcmp(str, "{") == 0) {
275  /* LM uses classes; read their names */
276  while ((fscanf(ctlfp, "%1023s", str) == 1) &&
277  (strcmp(str, "}") != 0)) {
278  void *val;
279  classdef_t *classdef;
280 
281  if (hash_table_lookup(classes, str, &val) == -1) {
282  E_ERROR("Unknown class %s in control file\n", str);
283  goto error_out;
284  }
285  classdef = val;
286  if (ngram_model_add_class(lm, str, 1.0,
287  classdef->words,
288  classdef->weights,
289  classdef->n_words) < 0) {
290  goto error_out;
291  }
292  E_INFO("Added class %s containing %d words\n",
293  str, classdef->n_words);
294  }
295  if (strcmp(str, "}") != 0) {
296  E_ERROR("Unexpected EOF in %s\n", lmctlfile);
297  goto error_out;
298  }
299  if (fscanf(ctlfp, "%1023s", str) != 1)
300  str[0] = '\0';
301  }
302  }
303  else
304  str[0] = '\0';
305  }
306  fclose(ctlfp);
307 
308  /* Now construct arrays out of lms and lmnames, and build an
309  * ngram_model_set. */
310  lms = glist_reverse(lms);
311  lmnames = glist_reverse(lmnames);
312  {
313  int32 n_models;
314  ngram_model_t **lm_array;
315  char **name_array;
316  gnode_t *lm_node, *name_node;
317  int32 i;
318 
319  n_models = glist_count(lms);
320  lm_array = ckd_calloc(n_models, sizeof(*lm_array));
321  name_array = ckd_calloc(n_models, sizeof(*name_array));
322  lm_node = lms;
323  name_node = lmnames;
324  for (i = 0; i < n_models; ++i) {
325  lm_array[i] = gnode_ptr(lm_node);
326  name_array[i] = gnode_ptr(name_node);
327  lm_node = gnode_next(lm_node);
328  name_node = gnode_next(name_node);
329  }
330  set = ngram_model_set_init(config, lm_array, name_array,
331  NULL, n_models);
332 
333  for (i = 0; i < n_models; ++i) {
334  ngram_model_free(lm_array[i]);
335  }
336  ckd_free(lm_array);
337  ckd_free(name_array);
338  }
339  error_out:
340  {
341  gnode_t *gn;
342  glist_t hlist;
343 
344  if (set == NULL) {
345  for (gn = lms; gn; gn = gnode_next(gn)) {
347  }
348  }
349  glist_free(lms);
350  for (gn = lmnames; gn; gn = gnode_next(gn)) {
351  ckd_free(gnode_ptr(gn));
352  }
353  glist_free(lmnames);
354  hlist = hash_table_tolist(classes, NULL);
355  for (gn = hlist; gn; gn = gnode_next(gn)) {
356  hash_entry_t *he = gnode_ptr(gn);
357  ckd_free((char *) he->key);
358  classdef_free(he->val);
359  }
360  glist_free(hlist);
361  hash_table_free(classes);
362  ckd_free(basedir);
363  }
364  return set;
365 }
366 
367 int32
369 {
370  ngram_model_set_t *set = (ngram_model_set_t *) base;
371  return set->n_models;
372 }
373 
376 {
377  ngram_model_set_t *set = (ngram_model_set_t *) base;
379 
380  if (set == NULL || set->n_models == 0)
381  return NULL;
382  itor = ckd_calloc(1, sizeof(*itor));
383  itor->set = set;
384  return itor;
385 }
386 
389 {
390  if (++itor->cur == itor->set->n_models) {
392  return NULL;
393  }
394  return itor;
395 }
396 
397 void
399 {
400  ckd_free(itor);
401 }
402 
405  char const **lmname)
406 {
407  if (lmname)
408  *lmname = itor->set->names[itor->cur];
409  return itor->set->lms[itor->cur];
410 }
411 
413 ngram_model_set_lookup(ngram_model_t * base, const char *name)
414 {
415  ngram_model_set_t *set = (ngram_model_set_t *) base;
416  int32 i;
417 
418  if (name == NULL) {
419  if (set->cur == -1)
420  return NULL;
421  else
422  return set->lms[set->cur];
423  }
424 
425  /* There probably won't be very many submodels. */
426  for (i = 0; i < set->n_models; ++i)
427  if (0 == strcmp(set->names[i], name))
428  break;
429  if (i == set->n_models)
430  return NULL;
431  return set->lms[i];
432 }
433 
435 ngram_model_set_select(ngram_model_t * base, const char *name)
436 {
437  ngram_model_set_t *set = (ngram_model_set_t *) base;
438  int32 i;
439 
440  /* There probably won't be very many submodels. */
441  for (i = 0; i < set->n_models; ++i)
442  if (0 == strcmp(set->names[i], name))
443  break;
444  if (i == set->n_models)
445  return NULL;
446  set->cur = i;
447  return set->lms[set->cur];
448 }
449 
450 const char *
452 {
453  ngram_model_set_t *set = (ngram_model_set_t *) base;
454 
455  if (set->cur == -1)
456  return NULL;
457  else
458  return set->names[set->cur];
459 }
460 
461 int32
463 {
464  ngram_model_set_t *set = (ngram_model_set_t *) base;
465 
466  if (set->cur == -1 || set_wid >= base->n_words)
467  return NGRAM_INVALID_WID;
468  else
469  return set->widmap[set_wid][set->cur];
470 }
471 
472 int32
474 {
475  ngram_model_set_t *set = (ngram_model_set_t *) base;
476 
477  if (set_wid >= base->n_words)
478  return FALSE;
479  else if (set->cur == -1) {
480  int32 i;
481  for (i = 0; i < set->n_models; ++i) {
482  if (set->widmap[set_wid][i] != ngram_unknown_wid(set->lms[i]))
483  return TRUE;
484  }
485  return FALSE;
486  }
487  else
488  return (set->widmap[set_wid][set->cur]
489  != ngram_unknown_wid(set->lms[set->cur]));
490 }
491 
494  const char **names, const float32 * weights)
495 {
496  ngram_model_set_t *set = (ngram_model_set_t *) base;
497 
498  /* If we have a set of weights here, then set them. */
499  if (names && weights) {
500  int32 i, j;
501 
502  /* We hope there aren't many models. */
503  for (i = 0; i < set->n_models; ++i) {
504  for (j = 0; j < set->n_models; ++j)
505  if (0 == strcmp(names[i], set->names[j]))
506  break;
507  if (j == set->n_models) {
508  E_ERROR("Unknown LM name %s\n", names[i]);
509  return NULL;
510  }
511  set->lweights[j] = logmath_log(base->lmath, weights[i]);
512  }
513  }
514  else if (weights) {
515  memcpy(set->lweights, weights,
516  set->n_models * sizeof(*set->lweights));
517  }
518  /* Otherwise just enable existing weights. */
519  set->cur = -1;
520  return base;
521 }
522 
525  ngram_model_t * model,
526  const char *name, float32 weight, int reuse_widmap)
527 {
528  ngram_model_set_t *set = (ngram_model_set_t *) base;
529  float32 fprob;
530  int32 scale, i;
531 
532  /* Add it to the array of lms. */
533  ++set->n_models;
534  set->lms = ckd_realloc(set->lms, set->n_models * sizeof(*set->lms));
535  set->lms[set->n_models - 1] = model;
536  set->names =
537  ckd_realloc(set->names, set->n_models * sizeof(*set->names));
538  set->names[set->n_models - 1] = ckd_salloc(name);
539  /* Expand the history mapping table if necessary. */
540  if (model->n > base->n) {
541  base->n = model->n;
542  set->maphist = ckd_realloc(set->maphist,
543  (model->n - 1) * sizeof(*set->maphist));
544  }
545 
546  /* Renormalize the interpolation weights. */
547  fprob = weight * 1.0f / set->n_models;
548  set->lweights = ckd_realloc(set->lweights,
549  set->n_models * sizeof(*set->lweights));
550  set->lweights[set->n_models - 1] = logmath_log(base->lmath, fprob);
551  /* Now normalize everything else to fit it in. This is
552  * accomplished by simply scaling all the other probabilities
553  * by (1-fprob). */
554  scale = logmath_log(base->lmath, 1.0 - fprob);
555  for (i = 0; i < set->n_models - 1; ++i)
556  set->lweights[i] += scale;
557 
558  /* Reuse the old word ID mapping if requested. */
559  if (reuse_widmap) {
560  int32 **new_widmap;
561 
562  /* Tack another column onto the widmap array. */
563  new_widmap = (int32 **) ckd_calloc_2d(base->n_words, set->n_models,
564  sizeof(**new_widmap));
565  for (i = 0; i < base->n_words; ++i) {
566  /* Copy all the existing mappings. */
567  memcpy(new_widmap[i], set->widmap[i],
568  (set->n_models - 1) * sizeof(**new_widmap));
569  /* Create the new mapping. */
570  new_widmap[i][set->n_models - 1] =
571  ngram_wid(model, base->word_str[i]);
572  }
573  ckd_free_2d((void **) set->widmap);
574  set->widmap = new_widmap;
575  }
576  else {
577  build_widmap(base, base->lmath, base->n);
578  }
579  return model;
580 }
581 
584  const char *name, int reuse_widmap)
585 {
586  ngram_model_set_t *set = (ngram_model_set_t *) base;
587  ngram_model_t *submodel;
588  int32 lmidx, scale, n, i;
589  float32 fprob;
590 
591  for (lmidx = 0; lmidx < set->n_models; ++lmidx)
592  if (0 == strcmp(name, set->names[lmidx]))
593  break;
594  if (lmidx == set->n_models)
595  return NULL;
596  submodel = set->lms[lmidx];
597 
598  /* Renormalize the interpolation weights by scaling them by
599  * 1/(1-fprob) */
600  fprob = (float32) logmath_exp(base->lmath, set->lweights[lmidx]);
601  scale = logmath_log(base->lmath, 1.0 - fprob);
602 
603  /* Remove it from the array of lms, renormalize remaining weights,
604  * and recalcluate n. */
605  --set->n_models;
606  n = 0;
607  ckd_free(set->names[lmidx]);
608  set->names[lmidx] = NULL;
609  for (i = 0; i < set->n_models; ++i) {
610  if (i >= lmidx) {
611  set->lms[i] = set->lms[i + 1];
612  set->names[i] = set->names[i + 1];
613  set->lweights[i] = set->lweights[i + 1];
614  }
615  set->lweights[i] -= scale;
616  if (set->lms[i]->n > n)
617  n = set->lms[i]->n;
618  }
619  /* There's no need to shrink these arrays. */
620  set->lms[set->n_models] = NULL;
621  set->lweights[set->n_models] = base->log_zero;
622  /* No need to shrink maphist either. */
623 
624  /* Reuse the existing word ID mapping if requested. */
625  if (reuse_widmap) {
626  /* Just go through and shrink each row. */
627  for (i = 0; i < base->n_words; ++i) {
628  memmove(set->widmap[i] + lmidx, set->widmap[i] + lmidx + 1,
629  (set->n_models - lmidx) * sizeof(**set->widmap));
630  }
631  }
632  else {
633  build_widmap(base, base->lmath, n);
634  }
635  return submodel;
636 }
637 
638 void
640  const char **words, int32 n_words)
641 {
642  ngram_model_set_t *set = (ngram_model_set_t *) base;
643  int32 i;
644 
645  /* Recreate the word mapping. */
646  if (base->writable) {
647  for (i = 0; i < base->n_words; ++i) {
648  ckd_free(base->word_str[i]);
649  }
650  }
651  ckd_free(base->word_str);
652  ckd_free_2d((void **) set->widmap);
653  base->writable = TRUE;
654  base->n_words = base->n_1g_alloc = n_words;
655  base->word_str = ckd_calloc(n_words, sizeof(*base->word_str));
656  set->widmap =
657  (int32 **) ckd_calloc_2d(n_words, set->n_models,
658  sizeof(**set->widmap));
659  hash_table_empty(base->wid);
660  for (i = 0; i < n_words; ++i) {
661  int32 j;
662  base->word_str[i] = ckd_salloc(words[i]);
663  (void) hash_table_enter_int32(base->wid, base->word_str[i], i);
664  for (j = 0; j < set->n_models; ++j) {
665  set->widmap[i][j] = ngram_wid(set->lms[j], base->word_str[i]);
666  }
667  }
668 }
669 
670 static int
671 ngram_model_set_apply_weights(ngram_model_t * base, float32 lw,
672  float32 wip)
673 {
674  ngram_model_set_t *set = (ngram_model_set_t *) base;
675  int32 i;
676 
677  /* Apply weights to each sub-model. */
678  for (i = 0; i < set->n_models; ++i)
679  ngram_model_apply_weights(set->lms[i], lw, wip);
680  return 0;
681 }
682 
683 static int32
684 ngram_model_set_score(ngram_model_t * base, int32 wid,
685  int32 * history, int32 n_hist, int32 * n_used)
686 {
687  ngram_model_set_t *set = (ngram_model_set_t *) base;
688  int32 mapwid;
689  int32 score;
690  int32 i;
691 
692  /* Truncate the history. */
693  if (n_hist > base->n - 1)
694  n_hist = base->n - 1;
695 
696  /* Interpolate if there is no current. */
697  if (set->cur == -1) {
698  score = base->log_zero;
699  for (i = 0; i < set->n_models; ++i) {
700  int32 j;
701  /* Map word and history IDs for each model. */
702  mapwid = set->widmap[wid][i];
703  for (j = 0; j < n_hist; ++j) {
704  if (history[j] == NGRAM_INVALID_WID)
705  set->maphist[j] = NGRAM_INVALID_WID;
706  else
707  set->maphist[j] = set->widmap[history[j]][i];
708  }
709  score = logmath_add(base->lmath, score,
710  set->lweights[i] +
711  ngram_ng_score(set->lms[i],
712  mapwid, set->maphist,
713  n_hist, n_used));
714  }
715  }
716  else {
717  int32 j;
718  /* Map word and history IDs (FIXME: do this in a function?) */
719  mapwid = set->widmap[wid][set->cur];
720  for (j = 0; j < n_hist; ++j) {
721  if (history[j] == NGRAM_INVALID_WID)
722  set->maphist[j] = NGRAM_INVALID_WID;
723  else
724  set->maphist[j] = set->widmap[history[j]][set->cur];
725  }
726  score = ngram_ng_score(set->lms[set->cur],
727  mapwid, set->maphist, n_hist, n_used);
728  }
729 
730  return score;
731 }
732 
733 static int32
734 ngram_model_set_raw_score(ngram_model_t * base, int32 wid,
735  int32 * history, int32 n_hist, int32 * n_used)
736 {
737  ngram_model_set_t *set = (ngram_model_set_t *) base;
738  int32 mapwid;
739  int32 score;
740  int32 i;
741 
742  /* Truncate the history. */
743  if (n_hist > base->n - 1)
744  n_hist = base->n - 1;
745 
746  /* Interpolate if there is no current. */
747  if (set->cur == -1) {
748  score = base->log_zero;
749  for (i = 0; i < set->n_models; ++i) {
750  int32 j;
751  /* Map word and history IDs for each model. */
752  mapwid = set->widmap[wid][i];
753  for (j = 0; j < n_hist; ++j) {
754  if (history[j] == NGRAM_INVALID_WID)
755  set->maphist[j] = NGRAM_INVALID_WID;
756  else
757  set->maphist[j] = set->widmap[history[j]][i];
758  }
759  score = logmath_add(base->lmath, score,
760  set->lweights[i] +
761  ngram_ng_prob(set->lms[i],
762  mapwid, set->maphist, n_hist,
763  n_used));
764  }
765  }
766  else {
767  int32 j;
768  /* Map word and history IDs (FIXME: do this in a function?) */
769  mapwid = set->widmap[wid][set->cur];
770  for (j = 0; j < n_hist; ++j) {
771  if (history[j] == NGRAM_INVALID_WID)
772  set->maphist[j] = NGRAM_INVALID_WID;
773  else
774  set->maphist[j] = set->widmap[history[j]][set->cur];
775  }
776  score = ngram_ng_prob(set->lms[set->cur],
777  mapwid, set->maphist, n_hist, n_used);
778  }
779 
780  return score;
781 }
782 
783 static int32
784 ngram_model_set_add_ug(ngram_model_t * base, int32 wid, int32 lweight)
785 {
786  ngram_model_set_t *set = (ngram_model_set_t *) base;
787  int32 *newwid;
788  int32 i, prob;
789 
790  /* At this point the word has already been added to the master
791  model and we have a new word ID for it. Add it to active
792  submodels and track the word IDs. */
793  newwid = ckd_calloc(set->n_models, sizeof(*newwid));
794  prob = base->log_zero;
795  for (i = 0; i < set->n_models; ++i) {
796  int32 wprob, n_hist;
797 
798  /* Only add to active models. */
799  if (set->cur == -1 || set->cur == i) {
800  /* Did this word already exist? */
801  newwid[i] = ngram_wid(set->lms[i], base->word_str[wid]);
802  if (newwid[i] == NGRAM_INVALID_WID) {
803  /* Add it to the submodel. */
804  newwid[i] =
805  ngram_model_add_word(set->lms[i], base->word_str[wid],
806  (float32) logmath_exp(base->lmath,
807  lweight));
808  if (newwid[i] == NGRAM_INVALID_WID) {
809  ckd_free(newwid);
810  return base->log_zero;
811  }
812  }
813  /* Now get the unigram probability for the new word and either
814  * interpolate it or use it (if this is the current model). */
815  wprob =
816  ngram_ng_prob(set->lms[i], newwid[i], NULL, 0, &n_hist);
817  if (set->cur == i)
818  prob = wprob;
819  else if (set->cur == -1)
820  prob =
821  logmath_add(base->lmath, prob,
822  set->lweights[i] + wprob);
823  }
824  else {
825  newwid[i] = NGRAM_INVALID_WID;
826  }
827  }
828  /* Okay we have the word IDs for this in all the submodels. Now
829  do some complicated memory mangling to add this to the
830  widmap. */
831  set->widmap =
832  ckd_realloc(set->widmap, base->n_words * sizeof(*set->widmap));
833  set->widmap[0] =
834  ckd_realloc(set->widmap[0],
835  base->n_words * set->n_models * sizeof(**set->widmap));
836  for (i = 0; i < base->n_words; ++i)
837  set->widmap[i] = set->widmap[0] + i * set->n_models;
838  memcpy(set->widmap[wid], newwid, set->n_models * sizeof(*newwid));
839  ckd_free(newwid);
840  return prob;
841 }
842 
843 static void
844 ngram_model_set_free(ngram_model_t * base)
845 {
846  ngram_model_set_t *set = (ngram_model_set_t *) base;
847  int32 i;
848 
849  for (i = 0; i < set->n_models; ++i)
850  ngram_model_free(set->lms[i]);
851  ckd_free(set->lms);
852  for (i = 0; i < set->n_models; ++i)
853  ckd_free(set->names[i]);
854  ckd_free(set->names);
855  ckd_free(set->lweights);
856  ckd_free(set->maphist);
857  ckd_free_2d((void **) set->widmap);
858 }
859 
860 static ngram_funcs_t ngram_model_set_funcs = {
861  ngram_model_set_free, /* free */
862  ngram_model_set_apply_weights, /* apply_weights */
863  ngram_model_set_score, /* score */
864  ngram_model_set_raw_score, /* raw_score */
865  ngram_model_set_add_ug, /* add_ug */
866 };
#define E_ERROR_SYSTEM(...)
Print error text; Call perror(&quot;&quot;);.
Definition: err.h:99
SPHINXBASE_EXPORT int32 ngram_ng_prob(ngram_model_t *model, int32 wid, int32 *history, int32 n_hist, int32 *n_used)
Quick &quot;raw&quot; probability lookup for a general N-Gram.
Definition: ngram_model.c:454
SPHINXBASE_EXPORT ngram_model_t * ngram_model_set_init(cmd_ln_t *config, ngram_model_t **models, char **names, const float32 *weights, int32 n_models)
Create a set of language models sharing a common space of word IDs.
char ** names
Names for language models.
SPHINXBASE_EXPORT ngram_model_t * ngram_model_read(cmd_ln_t *config, const char *file_name, ngram_file_type_t file_type, logmath_t *lmath)
Read an N-Gram model from a file on disk.
Definition: ngram_model.c:124
Miscellaneous useful string functions.
#define E_INFO(...)
Print logging information to standard error stream.
Definition: err.h:114
SPHINXBASE_EXPORT int32 hash_table_lookup(hash_table_t *h, const char *key, void **val)
Look up a key in a hash table and optionally return the associated value.
Definition: hash_table.c:302
#define ckd_calloc_2d(d1, d2, sz)
Macro for ckd_calloc_2d
Definition: ckd_alloc.h:270
#define ckd_calloc(n, sz)
Macros to simplify the use of above functions.
Definition: ckd_alloc.h:248
SPHINXBASE_EXPORT ngram_model_t * ngram_model_set_lookup(ngram_model_t *set, const char *name)
Look up a language model by name from a set.
#define E_ERROR(...)
Print error message to error log.
Definition: err.h:104
SPHINXBASE_EXPORT int32 ngram_unknown_wid(ngram_model_t *model)
Get the unknown word ID for a language model.
Definition: ngram_model.c:550
#define hash_table_enter_int32(h, k, v)
Add a 32-bit integer value to a hash table.
Definition: hash_table.h:228
hash_table_t * wid
Mapping of unigram names to word IDs.
char ** word_str
Unigram names.
SPHINXBASE_EXPORT ngram_model_set_iter_t * ngram_model_set_iter_next(ngram_model_set_iter_t *itor)
Move to the next language model in a set.
Sphinx&#39;s memory allocation/deallocation routines.
int32 * lweights
Log interpolation weights.
SPHINXBASE_EXPORT int32 ngram_wid(ngram_model_t *model, const char *word)
Look up numerical word ID.
Definition: ngram_model.c:585
SPHINXBASE_EXPORT glist_t hash_table_tolist(hash_table_t *h, int32 *count)
Build a glist of valid hash_entry_t pointers from the given hash table.
Definition: hash_table.c:616
#define NGRAM_INVALID_WID
Impossible word ID.
Definition: ngram_model.h:83
File names related operation.
SPHINXBASE_EXPORT ngram_model_t * ngram_model_set_read(cmd_ln_t *config, const char *lmctlfile, logmath_t *lmath)
Read a set of language models from a control file.
int32 ** widmap
Word ID mapping for submodels.
SPHINXBASE_EXPORT int logmath_log(logmath_t *lmath, float64 p)
Convert linear floating point number to integer log in base B.
Definition: logmath.c:447
A node in a generic list.
Definition: glist.h:100
Subclass of ngram_model for grouping language models.
uint8 writable
Are word strings writable?
SPHINXBASE_EXPORT int ngram_model_free(ngram_model_t *model)
Release memory associated with an N-Gram model.
Definition: ngram_model.c:263
#define ckd_salloc(ptr)
Macro for ckd_salloc
Definition: ckd_alloc.h:264
int32 n_models
Number of models in this set.
SPHINXBASE_EXPORT hash_table_t * hash_table_new(int32 size, int32 casearg)
Allocate a new hash table for a given expected size.
Definition: hash_table.c:158
SPHINXBASE_EXPORT void hash_table_empty(hash_table_t *h)
Delete all entries from a hash_table.
Definition: hash_table.c:483
SPHINXBASE_EXPORT int32 ngram_model_set_count(ngram_model_t *set)
Returns the number of language models in a set.
SPHINXBASE_EXPORT void ckd_free(void *ptr)
Test and free a 1-D array.
Definition: ckd_alloc.c:244
SPHINXBASE_EXPORT glist_t glist_add_ptr(glist_t g, void *ptr)
Create and prepend a new list node, with the given user-defined data, at the HEAD of the given generi...
Definition: glist.c:74
int32 * maphist
Word ID mapping for N-Gram history.
int32 log_zero
Zero probability, cached here for quick lookup.
SPHINXBASE_EXPORT void hash_table_free(hash_table_t *h)
Free the specified hash table; the caller is responsible for freeing the key strings pointed to by th...
Definition: hash_table.c:688
SPHINXBASE_EXPORT int ngram_model_apply_weights(ngram_model_t *model, float32 lw, float32 wip)
Apply a language weight, insertion penalty, and unigram weight to a language model.
Definition: ngram_model.c:360
A note by ARCHAN at 20050510: Technically what we use is so-called &quot;hash table with buckets&quot; which is...
Definition: hash_table.h:149
int32 n_1g_alloc
Number of allocated word strings (for new word addition)
SPHINXBASE_EXPORT ngram_model_t * ngram_model_set_add(ngram_model_t *set, ngram_model_t *model, const char *name, float32 weight, int reuse_widmap)
Add a language model to a set.
SPHINXBASE_EXPORT glist_t glist_reverse(glist_t g)
Reverse the order of the given glist.
Definition: glist.c:169
Iterator over a model set.
SPHINXBASE_EXPORT int logmath_get_shift(logmath_t *lmath)
Get the shift of the values in a log table.
Definition: logmath.c:386
SPHINXBASE_EXPORT ngram_model_t * ngram_model_set_remove(ngram_model_t *set, const char *name, int reuse_widmap)
Remove a language model from a set.
ngram_model_t base
Base ngram_model_t structure.
SPHINXBASE_EXPORT void glist_free(glist_t g)
Free the given generic list; user-defined data contained within is not automatically freed...
Definition: glist.c:133
int32 cur
Currently selected model, or -1 for none.
SPHINXBASE_EXPORT float64 logmath_get_base(logmath_t *lmath)
Get the log base.
Definition: logmath.c:368
SPHINXBASE_EXPORT ngram_model_t * ngram_model_retain(ngram_model_t *model)
Retain ownership of an N-Gram model.
Definition: ngram_model.c:249
SPHINXBASE_EXPORT int path_is_absolute(const char *file)
Test whether a pathname is absolute for the current OS.
Definition: filename.c:105
#define gnode_ptr(g)
Head of a list of gnodes.
Definition: glist.h:109
SPHINXBASE_EXPORT void ngram_model_set_map_words(ngram_model_t *set, const char **words, int32 n_words)
Set the word-to-ID mapping for this model set.
SPHINXBASE_EXPORT int32 ngram_model_add_word(ngram_model_t *model, const char *word, float32 weight)
Add a word (unigram) to the language model.
Definition: ngram_model.c:649
uint8 n
This is an n-gram model (1, 2, 3, ...).
Implementation of logging routines.
SPHINXBASE_EXPORT ngram_model_t * ngram_model_set_iter_model(ngram_model_set_iter_t *itor, char const **lmname)
Get language model and associated name from an iterator.
logmath_t * lmath
Log-math object.
ngram_model_t ** lms
Language models in this set.
SPHINXBASE_EXPORT int32 ngram_model_add_class(ngram_model_t *model, const char *classname, float32 classweight, char **words, const float32 *weights, int32 n_words)
Add a new class to a language model.
Definition: ngram_model.c:831
One class definition from a classdef file.
SPHINXBASE_EXPORT ngram_model_t * ngram_model_set_interp(ngram_model_t *set, const char **names, const float32 *weights)
Set interpolation weights for a set and enables interpolation.
SPHINXBASE_EXPORT ngram_model_set_iter_t * ngram_model_set_iter(ngram_model_t *set)
Begin iterating over language models in a set.
Opaque structure used to hold the results of command-line parsing.
SPHINXBASE_EXPORT char * string_join(const char *base,...)
Concatenate a NULL-terminated argument list of strings, returning a newly allocated string...
Definition: strfuncs.c:70
Implementation-specific functions for operating on ngram_model_t objects.
SPHINXBASE_EXPORT void ngram_model_set_iter_free(ngram_model_set_iter_t *itor)
Finish iteration over a langauge model set.
SPHINXBASE_EXPORT void ckd_free_2d(void *ptr)
Free a 2-D array (ptr) previously allocated by ckd_calloc_2d.
Definition: ckd_alloc.c:255
SPHINXBASE_EXPORT const char * ngram_model_set_current(ngram_model_t *set)
Get the current language model name, if any.
Common implementation of ngram_model_t.
void * val
Key-length; the key string does not have to be a C-style NULL terminated string; it can have arbitrar...
Definition: hash_table.h:155
SPHINXBASE_EXPORT ngram_model_t * ngram_model_set_select(ngram_model_t *set, const char *name)
Select a single language model from a set for scoring.
SPHINXBASE_EXPORT int32 ngram_model_set_current_wid(ngram_model_t *set, int32 set_wid)
Query the word-ID mapping for the current language model.
SPHINXBASE_EXPORT float64 logmath_exp(logmath_t *lmath, int logb_p)
Convert integer log in base B to linear floating point.
Definition: logmath.c:456
#define ckd_realloc(ptr, sz)
Macro for ckd_realloc
Definition: ckd_alloc.h:258
Set of language models.
SPHINXBASE_EXPORT int32 ngram_model_set_known_wid(ngram_model_t *set, int32 set_wid)
Test whether a word ID corresponds to a known word in the current state of the language model set...
SPHINXBASE_EXPORT int32 glist_count(glist_t g)
Count the number of element in a given link list.
Definition: glist.c:145
Determine file type automatically.
Definition: ngram_model.h:78
SPHINXBASE_EXPORT int32 ngram_ng_score(ngram_model_t *model, int32 wid, int32 *history, int32 n_hist, int32 *n_used)
Quick general N-Gram score lookup.
Definition: ngram_model.c:375
SPHINXBASE_EXPORT int logmath_add(logmath_t *lmath, int logb_p, int logb_q)
Add two values in log space (i.e.
Definition: logmath.c:392
int32 n_words
Number of actual word strings (NOT the same as the number of unigrams, due to class words)...