Skip to content

Commit

Permalink
adapted README documentation; minor improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
n0y committed Dec 3, 2021
1 parent 157c17d commit 2afd6cb
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 30 deletions.
18 changes: 9 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,26 +18,26 @@ There're two options running the Mediatheken DLNA Bridge: Local installation, or
* Download latest tar.gz from the [RELEASES] page.
* unpack it
* run the provided JAR file with `java -jar mediatheken-dlna-brige.jar`.
* you may give a java parameter `-Xmx700m` or so for a memory limit. The memory will be used for a
database cache. Giving it more than 1GB doesn't improve things.
* you may give a java parameter `-Xmx256m` or so for a memory limit. The memory will be used for a lucene query cache.
Giving it more than 512MB doesn't improve things.

You may override any setting in three ways:

* Create a file `config/application.properties`, and put the settings there, or
* Add the settings to your `java` command line with `-DSETTING=VALUE`, or
* Create environment variables `SETTING=VALUE`

A local database will be created in a directory local to your working directory. It's a H2 database, with
username/password of `sa`/`sa` (as usual with h2). Feel free to dig into it.
A local index directory will be created in a directory local to your working directory. It's a Lucene index. Feel free
to dig into it.

### Docker installation

Find the released Docker images at [DOCKERHUB]. Simply run them with:

`docker run corelogicsde/mediatheken-dlna-bridge:latest`

You may set a memory limit with adding `--memory=500M` to the docker line. The memory will be used for a
database cache. Giving it more than 500 MB doesn't improve things.
You may set a memory limit with adding `--memory=500M` to the docker line. The memory will be used for a lucene query
cache. 128MB seem to work fine, and giving it more than 500 MB doesn't improve things.

You may override any setting in two ways:

Expand All @@ -60,8 +60,8 @@ services:
deploy:
resources:
limits:
memory: 400M
# This volume can be used to store the database outside of the container
memory: 256M
# This volume can be used to store the lucene index outside of the container
#volumes:
# - source: ./data/mediathek-data
# target: /app/data
Expand All @@ -81,7 +81,7 @@ services:

Common configuration

* _DATABASE_LOCATION_ points to the directory and name of the H2 database. Defaults to `./data/clipdb`
* _DATABASE_LOCATION_ points to the directory and name of the lucene index directory. Defaults to `./data/clipdb`
* _UPDATEINTERVAL_FULL_HOURS_ number of hours between full db updates. Defaults to `24`.
* _DISPLAY_NAME_ under this name, the Mediatheken-DLNA-Bridge will be visible in your network. Defaults to `Mediatheken`.

Expand Down
9 changes: 5 additions & 4 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,12 @@
<version>E-SNAPSHOT</version>

<properties>
<maven.compiler.target>17</maven.compiler.target>
<maven.compiler.source>17</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target>
<maven.compiler.source>11</maven.compiler.source>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<cling.version>2.1.2</cling.version>
<lucene.version>8.11.0</lucene.version>
<log4j.version>2.14.1</log4j.version>
<junit.version>5.8.1</junit.version>
<mockito.version>4.1.0</mockito.version>
Expand Down Expand Up @@ -187,12 +188,12 @@
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-core</artifactId>
<version>8.11.0</version>
<version>${lucene.version}</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-facet</artifactId>
<version>8.11.0</version>
<version>${lucene.version}</version>
</dependency>
<dependency>
<groupId>com.sparkjava</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ interface SearchFunction<T> {
}

private static final Logger LOGGER = LogManager.getLogger(ClipRepository.class);
private static final String DOCID_LAST_UPDATED = "last-update-stat";
private static final FieldType TYPE_NO_TOKENIZE = new FieldType();

static {
Expand Down Expand Up @@ -189,17 +190,14 @@ void openConnection(String indexPath, long cacheSize) {
this.index = new NIOFSDirectory(new File(indexPath).toPath());
}

StandardAnalyzer analyzer = new StandardAnalyzer();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
IndexWriter writter = new IndexWriter(this.index, indexWriterConfig);

// need at least one entry for a reader to work on...
var writer = new IndexWriter(this.index, new IndexWriterConfig(new StandardAnalyzer()));
var document = new Document();
document.add(new NumericDocValuesField("cont", 0));
writter.addDocument(document);
writter.close();
document.add(new Field(ClipField.ID.term(), ClipField.ID.term("placeholder"), TYPE_NO_TOKENIZE));
writer.updateDocument(new Term(ClipField.ID.term(), ClipField.ID.term("placeholder")), document);
writer.close();

IndexSearcher.setDefaultQueryCache(new LRUQueryCache(1000, cacheSize));

this.searcherManager = new SearcherManager(this.index, null);
} catch (final IOException e) {
throw new IllegalStateException("Could not initialize FS directory on '" + indexPath + "'.", e);
Expand All @@ -226,7 +224,7 @@ private <T> T withSearcher(SearchFunction<T> function) {
public Optional<ZonedDateTime> findLastFullImport() {
LOGGER.debug("finding last full import");
return withSearcher(searcher -> {
var result = searcher.search(new TermQuery(new Term(ClipField.ID.term(), ClipField.ID.term("last-update-stat"))), 1);
var result = searcher.search(new TermQuery(new Term(ClipField.ID.term(), ClipField.ID.term(DOCID_LAST_UPDATED))), 1);
if (result.scoreDocs.length > 0) {
var doc = searcher.getIndexReader().document(result.scoreDocs[0].doc);
return Optional.of(ZonedDateTime.parse(doc.get(ClipField.IMPORTEDAT.value())));
Expand All @@ -242,10 +240,10 @@ public synchronized void updateLastFullImport(ZonedDateTime dateTime) {
var indexWriterConfig = new IndexWriterConfig(analyzer);
try (var writer = new IndexWriter(this.index, indexWriterConfig)) {
var d = new Document();
addToDocument(d, ClipField.ID, "last-update-stat");
addToDocument(d, ClipField.ID, DOCID_LAST_UPDATED);
addDateToDocument(d, ClipField.IMPORTEDAT, dateTime);
writer.updateDocument(
new Term(ClipField.ID.term(), ClipField.ID.term("last-update-stat")),
new Term(ClipField.ID.term(), ClipField.ID.term(DOCID_LAST_UPDATED)),
applyFacets(d));
}
searcherManager.maybeRefreshBlocking();
Expand All @@ -260,9 +258,9 @@ public List<String> findAllChannels() {
var query = new MatchAllDocsQuery();
var state = new DefaultSortedSetDocValuesReaderState(searcher.getIndexReader(), ClipField.CHANNELNAME.facet());
var fc = new FacetsCollector();
FacetsCollector.search(searcher, query, 100, fc);
FacetsCollector.search(searcher, query, 10000, fc);
var facets = new SortedSetDocValuesFacetCounts(state, fc);
return Stream.of(facets.getTopChildren(10, ClipField.CHANNELNAME.facet()).labelValues)
return Stream.of(facets.getTopChildren(10000, ClipField.CHANNELNAME.facet()).labelValues)
.map(l -> l.label)
.collect(Collectors.toList());
});
Expand All @@ -277,9 +275,9 @@ public Map<String, Integer> findAllContainedIns(String channelName) {
var query = new TermQuery(new Term(ClipField.CHANNELNAME.termLower(), ClipField.CHANNELNAME.termLower(channelName)));
var state = new DefaultSortedSetDocValuesReaderState(searcher.getIndexReader(), ClipField.CONTAINEDIN.facet());
var fc = new FacetsCollector();
FacetsCollector.search(searcher, query, 100, fc);
FacetsCollector.search(searcher, query, 10000, fc);
var facets = new SortedSetDocValuesFacetCounts(state, fc);
return Stream.of(facets.getTopChildren(10, ClipField.CONTAINEDIN.facet()).labelValues)
return Stream.of(facets.getTopChildren(10000, ClipField.CONTAINEDIN.facet()).labelValues)
.map(l -> Map.entry(l.label, l.value.intValue()))
.collect(toMap(Map.Entry::getKey, Map.Entry::getValue));

Expand All @@ -298,9 +296,9 @@ public Map<String, Integer> findAllContainedIns(String channelName, String start
.build();
var state = new DefaultSortedSetDocValuesReaderState(searcher.getIndexReader(), ClipField.CONTAINEDIN.facet());
var fc = new FacetsCollector();
FacetsCollector.search(searcher, query, 100, fc);
FacetsCollector.search(searcher, query, 10000, fc);
var facets = new SortedSetDocValuesFacetCounts(state, fc);
return Stream.of(facets.getTopChildren(10, ClipField.CONTAINEDIN.facet()).labelValues)
return Stream.of(facets.getTopChildren(10000, ClipField.CONTAINEDIN.facet()).labelValues)
.map(l -> Map.entry(l.label, l.value.intValue()))
.collect(toMap(Map.Entry::getKey, Map.Entry::getValue));
});
Expand Down

0 comments on commit 2afd6cb

Please sign in to comment.