/*------------------------------------------------------------------------------ * Copyright (C) 2003-2006 Ben van Klinken and the CLucene Team * * Distributable under the terms of either the Apache License (Version 2.0) or * the GNU Lesser General Public License, as specified in the COPYING file. ------------------------------------------------------------------------------*/ #ifndef _lucene_index_IndexWriter_ #define _lucene_index_IndexWriter_ #if defined(_LUCENE_PRAGMA_ONCE) # pragma once #endif #include "CLucene/analysis/AnalysisHeader.h" #include "CLucene/util/VoidList.h" #include "CLucene/search/Similarity.h" #include "CLucene/store/Lock.h" #include "CLucene/store/TransactionalRAMDirectory.h" #include "SegmentHeader.h" CL_NS_DEF(index) /** An IndexWriter creates and maintains an index. The third argument to the constructor determines whether a new index is created, or whether an existing index is opened for the addition of new documents. In either case, documents are added with the addDocument method. When finished adding documents, close should be called.
If an index will not have more documents added for a while and optimal search performance is desired, then the optimize method should be called before the index is closed.
Opening an IndexWriter creates a lock file for the directory in use. Trying to open
another IndexWriter on the same directory will lead to an IOException. The IOException
is also thrown if an IndexReader on the same directory is used to delete documents
from the index.
@see IndexModifier IndexModifier supports the important methods of IndexWriter plus deletion
*/
class IndexWriter:LUCENE_BASE {
class LockWith2:public CL_NS(store)::LuceneLockWith The default value is DEFAULT_MAX_BUFFERED_DOCS.*/
void setMaxBufferedDocs(int32_t val){ minMergeDocs = val; }
/**
* @see #setMaxBufferedDocs
*/
int32_t getMaxBufferedDocs(){ return minMergeDocs; }
/**
* Default value for the write lock timeout (1,000).
*/
LUCENE_STATIC_CONSTANT(int64_t, WRITE_LOCK_TIMEOUT = 1000);
/**
* Sets the maximum time to wait for a write lock (in milliseconds).
*/
void setWriteLockTimeout(int64_t writeLockTimeout) { this->writeLockTimeout = writeLockTimeout; }
/**
* @see #setWriteLockTimeout
*/
int64_t getWriteLockTimeout() { return writeLockTimeout; }
/**
* Default value for the commit lock timeout (10,000).
*/
LUCENE_STATIC_CONSTANT(int64_t, COMMIT_LOCK_TIMEOUT = 10000);
/**
* Sets the maximum time to wait for a commit lock (in milliseconds).
*/
void setCommitLockTimeout(int64_t commitLockTimeout) { this->commitLockTimeout = commitLockTimeout; }
/**
* @see #setCommitLockTimeout
*/
int64_t getCommitLockTimeout() { return commitLockTimeout; }
static const char* WRITE_LOCK_NAME; //"write.lock";
static const char* COMMIT_LOCK_NAME; //"commit.lock";
/**
* Default value is 10. Change using {@link #setMergeFactor(int)}.
*/
LUCENE_STATIC_CONSTANT(int32_t, DEFAULT_MERGE_FACTOR = 10);
/* Determines how often segment indices are merged by addDocument(). With
* smaller values, less RAM is used while indexing, and searches on
* unoptimized indices are faster, but indexing speed is slower. With larger
* values more RAM is used while indexing and searches on unoptimized indices
* are slower, but indexing is faster. Thus larger values (> 10) are best
* for batched index creation, and smaller values (< 10) for indices that are
* interactively maintained.
*
* This must never be less than 2. The default value is 10.
*/
int32_t getMergeFactor() const{ return mergeFactor; }
void setMergeFactor(int32_t val){ mergeFactor = val; }
/** Expert: The fraction of terms in the "dictionary" which should be stored
* in RAM. Smaller values use more memory, but make searching slightly
* faster, while larger values use less memory and make searching slightly
* slower. Searching is typically not dominated by dictionary lookup, so
* tweaking this is rarely useful.
*/
LUCENE_STATIC_CONSTANT(int32_t, DEFAULT_TERM_INDEX_INTERVAL = 128);
/** Expert: Set the interval between indexed terms. Large values cause less
* memory to be used by IndexReader, but slow random-access to terms. Small
* values cause more memory to be used by an IndexReader, and speed
* random-access to terms.
*
* This parameter determines the amount of computation required per query
* term, regardless of the number of documents that contain that term. In
* particular, it is the maximum number of other terms that must be
* scanned before a term is located and its frequency and position information
* may be processed. In a large index with user-entered query terms, query
* processing time is likely to be dominated not by term lookup but rather
* by the processing of frequency and positional data. In a small index
* or when many uncommon query terms are generated (e.g., by wildcard
* queries) term lookup may become a dominant cost.
*
* In particular, The default value is 10.*/
int32_t getMinMergeDocs() const{ return minMergeDocs; }
void setMinMergeDocs(int32_t val){ minMergeDocs = val; }
/** Determines the largest number of documents ever merged by addDocument().
* Small values (e.g., less than 10,000) are best for interactive indexing,
* as this limits the length of pauses while indexing to a few seconds.
* Larger values are best for batched indexing and speedier searches.
*
* The default value is {@link #DEFAULT_MAX_MERGE_DOCS}.
*/
LUCENE_STATIC_CONSTANT(int32_t, DEFAULT_MAX_MERGE_DOCS = LUCENE_INT32_MAX_SHOULDBE);
/**Determines the largest number of documents ever merged by addDocument().
* Small values (e.g., less than 10,000) are best for interactive indexing,
* as this limits the length of pauses while indexing to a few seconds.
* Larger values are best for batched indexing and speedier searches.
*
* The default value is {@link Integer#MAX_VALUE}.
*/
int32_t getMaxMergeDocs() const{ return maxMergeDocs; }
void setMaxMergeDocs(int32_t val){ maxMergeDocs = val; }
/**
* Constructs an IndexWriter for the index in This may be used to parallelize batch indexing. A large document
* collection can be broken into sub-collections. Each sub-collection can be
* indexed in parallel, on a different thread, process or machine. The
* complete index can then be created by merging sub-collection indices
* with this method.
*
* After this completes, the index is optimized.
*@synchronized
*/
void addIndexes(CL_NS(store)::Directory** dirs);
/** Merges the provided indexes into this index.
* After this completes, the index is optimized. The provided IndexReaders are not closed. This defaults to the current value of {@link Similarity#getDefault()}.
*/
CL_NS(search)::Similarity* getSimilarity() { return this->similarity; }
/** Returns the analyzer used by this index. */
CL_NS(analysis)::Analyzer* getAnalyzer() { return analyzer; }
private:
/** Merges all RAM-resident segments. */
void flushRamSegments();
/** Incremental segment merger. */
void maybeMergeSegments();
/** Pops segments off of segmentInfos stack down to minSegment, merges them,
* and pushes the merged index onto the top of the segmentInfos stack.
*/
void mergeSegments(const uint32_t minSegment);
/** Merges the named range of segments, replacing them in the stack with a
* single segment. */
void mergeSegments(const uint32_t minSegment, const uint32_t end);
void deleteFiles(CL_NS(util)::AStringArrayWithDeletor& files);
void readDeleteableFiles(CL_NS(util)::AStringArrayWithDeletor& files);
void writeDeleteableFiles(CL_NS(util)::AStringArrayWithDeletor& files);
/*
* Some operating systems (e.g. Windows) don't permit a file to be deleted
* while it is opened for read (e.g. by another process or thread). So we
* assume that when a delete fails it is because the file is open in another
* process, and queue the file for subsequent deletion.
*/
void deleteSegments(CL_NS(util)::CLVectornumUniqueTerms/interval
terms are read into
* memory by an IndexReader, and, on average, interval/2
terms
* must be scanned for each random term access.
*
* @see #DEFAULT_TERM_INDEX_INTERVAL
*/
void setTermIndexInterval(int32_t interval) { termIndexInterval = interval; }
/** Expert: Return the interval between indexed terms.
*
* @see #setTermIndexInterval(int)
*/
int32_t getTermIndexInterval() { return termIndexInterval; }
/** Determines the minimal number of documents required before the buffered
* in-memory documents are merging and a new Segment is created.
* Since Documents are merged in a {@link RAMDirectory},
* large value gives faster indexing. At the same time, mergeFactor limits
* the number of files open in a FSDirectory.
*
* path
.
* Text will be analyzed with a
. If create
* is true, then a new, empty index will be created in
* path
, replacing the index already there, if any.
*
* @param path the path to the index directory
* @param a the analyzer to use
* @param create true
to create the index or overwrite
* the existing one; false
to append to the existing
* index
* @throws IOException if the directory cannot be read/written to, or
* if it does not exist, and create
is
* false
*/
IndexWriter(const char* path, CL_NS(analysis)::Analyzer* a, const bool create, const bool closeDir=true);
/**Constructs an IndexWriter for the index in d
. Text will be
* analyzed with a
. If create
is true, then a new,
* empty index will be created in d
, replacing the index already
* there, if any.
*/
IndexWriter(CL_NS(store)::Directory* d, CL_NS(analysis)::Analyzer* a, const bool create, const bool closeDir=false);
/**
* Flushes all changes to an index, closes all associated files, and closes
* the directory that the index is stored in.
*/
void close();
/**Returns the number of documents currently in this index.
* synchronized
*/
int32_t docCount();
/**
* Adds a document to this index, using the provided analyzer instead of the
* value of {@link #getAnalyzer()}. If the document contains more than
* {@link #setMaxFieldLength(int)} terms for a given field, the remainder are
* discarded.
*/
void addDocument(CL_NS(document)::Document* doc, CL_NS(analysis)::Analyzer* analyzer=NULL);
/**Merges all segments together into a single segment, optimizing an index
* for search.
*@synchronized
*/
void optimize();
/**Merges all segments from an array of indices into this index.
*
*