Unix tools introduced. Today: FHS

The Filesystem Hierarchy Standard (FHS) defines a standard layout to organize various kinds of application and OS related data in a predictable and common way [1].

A basic knowledge of the FHS will help you to find application or OS related data more easily. If you are a developer, it also provides a good orientation for organizing your own applications in a maintainable way, e.g. as ubuntu package.

/bin – essential user commands

/boot – OS boot loader

/dev – devices (everything is a file principle)

/etc – system configuration

/home – user data

/lib – essentail shared libraries

/media – mount point for removable media

/mnt – mount point for temporarily mounted filesystems

/opt – add-on applications

/root – home of root

/run – run time variable data

/sbin – system binaries

/srv – data for services provided by the system

/tmp – temporary data

/proc – is a virtual filesystem

/usr – secondary hierarchy

bin – Most user commands
lib – Libraries
local – Local hierarchy (empty after main installation)
sbin – Non-vital system binaries
share – Architecture-independent data

/var – variable data

cache  – Application cache data
lib  – Variable state information
local  – Variable data for /usr/local
lock –  Lock files
log – Log files and directories
opt – Variable data for /opt
run – Data relevant to running processes
spool – Application spool data
tmp  -Temporary files preserved between system reboots

Find more

What about – /init.d ?

What does the .d stand for in directory names?

FHS in Debian


Converting XML to JSON

You can use standard tools for it

1. Use the tool xjc from your jdk to generate Java classes from schema

Since Java 9 you must explicitly add JAXB as module with –add-modules java.se.ee See: How to resolve java.lang.NoClassDefFoundError: javax/xml/bind/JAXBException in Java 9

Since Java 11 you have to download xjc in an extra step from https://javaee.github.io/jaxb-v2/

2. Read in as XML write out as JSON using Jackson


With https://schema.datacite.org/meta/kernel-4.1/metadata.xsd

1. Use the tool xjc from your jdk

In my example I will use a fairly complex example based on datacite schemas.

/path/to/jdk/bin/xjc -d /path/to/java/project \
-p stack24174963.datacite \

This will reply with

parsing a schema...
compiling a schema...

If you look into Resource.Creator and Resource.Contributor you will see that the member variables givenName and familyName are not correctly typed. Change their type from Object to String, also apply your changes to the corresponding getter and setter methods!

2. Read in as XML write out as JSON using Jackson

import com.fasterxml.jackson.databind.ObjectMapper;
  import com.fasterxml.jackson.databind.SerializationFeature;

  import stack24174963.datacite.Resource;

  public class HowToXmlToJsonWithSchema {
    public void readXmlAndConvertToSchema() throws Exception {
        String example = "schemas/datacite/kernel-4.1/example/datacite-example-complicated-v4.1.xml";
        try (InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream(example)) {
            Resource resource = JAXB.unmarshal(in, Resource.class);

    private String asJson(Object obj) throws Exception {
        StringWriter w = new StringWriter();
        new ObjectMapper().configure(SerializationFeature.INDENT_OUTPUT, true).writeValue(w, obj);
        String result = w.toString();
        return result;


  "identifier" : {
    "value" : "10.5072/testpub",
    "identifierType" : "DOI"
  "creators" : {
    "creator" : [ {
      "creatorName" : {
        "value" : "Smith, John",
        "nameType" : "PERSONAL"
      "givenName" : "John",
      "familyName" : "Smith",
      "nameIdentifier" : [ ],
      "affiliation" : [ ]
    }, {
      "creatorName" : {
        "value" : "つまらないものですが",
        "nameType" : null
      "givenName" : null,
      "familyName" : null,
      "nameIdentifier" : [ {
        "value" : "0000000134596520",
        "nameIdentifierScheme" : "ISNI",
        "schemeURI" : "http://isni.org/isni/"
      } ],
      "affiliation" : [ ]
    } ]
  "titles" : {
    "title" : [ {
      "value" : "Właściwości rzutowań podprzestrzeniowych",
      "titleType" : null,
      "lang" : "pl"
    }, {
      "value" : "Translation of Polish titles",
      "titleType" : "TRANSLATED_TITLE",
      "lang" : "en"
    } ]
  "publisher" : "Springer",
  "publicationYear" : "2010",
  "resourceType" : {
    "value" : "Monograph",
    "resourceTypeGeneral" : "TEXT"
  "subjects" : {
    "subject" : [ {
      "value" : "830 German & related literatures",
      "subjectScheme" : "DDC",
      "schemeURI" : null,
      "valueURI" : null,
      "lang" : "en"
    }, {
      "value" : "Polish Literature",
      "subjectScheme" : null,
      "schemeURI" : null,
      "valueURI" : null,
      "lang" : "en"
    } ]
  "contributors" : {
    "contributor" : [ {
      "contributorName" : {
        "value" : "Doe, John",
        "nameType" : "PERSONAL"
      "givenName" : "John",
      "familyName" : "Doe",
      "nameIdentifier" : [ {
        "value" : "0000-0001-5393-1421",
        "nameIdentifierScheme" : "ORCID",
        "schemeURI" : "http://orcid.org/"
      } ],
      "affiliation" : [ ],
      "contributorType" : "DATA_COLLECTOR"
    } ]
  "dates" : null,
  "language" : "de",
  "alternateIdentifiers" : {
    "alternateIdentifier" : [ {
      "value" : "937-0-4523-12357-6",
      "alternateIdentifierType" : "ISBN"
    } ]
  "relatedIdentifiers" : {
    "relatedIdentifier" : [ {
      "value" : "10.5272/oldertestpub",
      "resourceTypeGeneral" : null,
      "relatedIdentifierType" : "DOI",
      "relationType" : "IS_PART_OF",
      "relatedMetadataScheme" : null,
      "schemeURI" : null,
      "schemeType" : null
    } ]
  "sizes" : {
    "size" : [ "256 pages" ]
  "formats" : {
    "format" : [ "pdf" ]
  "version" : "2",
  "rightsList" : {
    "rights" : [ {
      "value" : "Creative Commons Attribution-NoDerivs 2.0 Generic",
      "rightsURI" : "http://creativecommons.org/licenses/by-nd/2.0/",
      "lang" : null
    } ]
  "descriptions" : {
    "description" : [ {
      "content" : [ "\n      Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea\n      takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores\n      et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.\n    " ],
      "descriptionType" : "ABSTRACT",
      "lang" : "la"
    } ]
  "geoLocations" : null,
  "fundingReferences" : null

For XML input:

<?xml version="1.0" encoding="UTF-8"?>
  <resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://datacite.org/schema/kernel-4" xsi:schemaLocation="http://datacite.org/schema/kernel-4 http://schema.datacite.org/meta/kernel-4.1/metadata.xsd">
    <identifier identifierType="DOI">10.5072/testpub</identifier>
        <creatorName nameType="Personal">Smith, John</creatorName>
        <nameIdentifier nameIdentifierScheme="ISNI" schemeURI="http://isni.org/isni/">0000000134596520</nameIdentifier>
      <title xml:lang="pl">Właściwości rzutowań podprzestrzeniowych</title>
      <title xml:lang="en" titleType="TranslatedTitle">Translation of Polish titles</title>
      <subject xml:lang="en" subjectScheme="DDC">830 German &amp; related literatures</subject>
      <subject xml:lang="en">Polish Literature</subject>
      <contributor contributorType="DataCollector">
        <contributorName nameType="Personal">Doe, John</contributorName>
        <nameIdentifier nameIdentifierScheme="ORCID" schemeURI="http://orcid.org/">0000-0001-5393-1421</nameIdentifier>
    <resourceType resourceTypeGeneral="Text">Monograph</resourceType>
      <alternateIdentifier alternateIdentifierType="ISBN">937-0-4523-12357-6</alternateIdentifier>
      <relatedIdentifier relatedIdentifierType="DOI" relationType="IsPartOf">10.5272/oldertestpub</relatedIdentifier>
      <size>256 pages</size>
      <rights rightsURI="http://creativecommons.org/licenses/by-nd/2.0/">Creative Commons Attribution-NoDerivs 2.0 Generic</rights>
      <description xml:lang="la" descriptionType="Abstract">
        Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea
        takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores
        et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.


See also:




Process parts of XML with XPath in Java

This shows you how to

1. Read in an XML file to a DOM
2. Filter out a set of Nodes with XPath
3. Perform a certain action on each of the extracted Nodes.

We will call the code with the following statement

processFilteredXml(xmlIn, xpathExpr,(node) -> {/*Do something...*/;});

In our case we want to print some creatorNames from a book.xml using “//book/creators/creator/creatorName” as xpath to perform a printNode action on each Node that matches the XPath.

Full code

public void printXml() {
    try (InputStream in = readFile("book.xml")) {
        processFilteredXml(in, "//book/creators/creator/creatorName", (node) -> {
            printNode(node, System.out);
    } catch (Exception e) {
        throw new RuntimeException(e);

private InputStream readFile(String yourSampleFile) {
    return Thread.currentThread().getContextClassLoader().getResourceAsStream(yourSampleFile);

private void processFilteredXml(InputStream in, String xpath, Consumer<Node> process) {
    Document doc = readXml(in);
    NodeList list = filterNodesByXPath(doc, xpath);
    for (int i = 0; i < list.getLength(); i++) {
        Node node = list.item(i);

public Document readXml(InputStream xmlin) {
    try {
        DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
        DocumentBuilder db = dbf.newDocumentBuilder();
        return db.parse(xmlin);
    } catch (Exception e) {
        throw new RuntimeException(e);

private NodeList filterNodesByXPath(Document doc, String xpathExpr) {
    try {
        XPathFactory xPathFactory = XPathFactory.newInstance();
        XPath xpath = xPathFactory.newXPath();
        XPathExpression expr = xpath.compile(xpathExpr);
        Object eval = expr.evaluate(doc, XPathConstants.NODESET);
        return (NodeList) eval;
    } catch (Exception e) {
        throw new RuntimeException(e);

private void printNode(Node node, PrintStream out) {
    try {
        Transformer transformer = TransformerFactory.newInstance().newTransformer();
        transformer.setOutputProperty(OutputKeys.INDENT, "yes");
        transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
        transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
        StreamResult result = new StreamResult(new StringWriter());
        DOMSource source = new DOMSource(node);
        transformer.transform(source, result);
        String xmlString = result.getWriter().toString();
    } catch (Exception e) {
        throw new RuntimeException(e);


<creatorName>Fosmire, Michael</creatorName>

<creatorName>Wertz, Ruth</creatorName>

<creatorName>Purzer, Senay</creatorName>

For book.xml

      <creatorName>Fosmire, Michael</creatorName>
      <creatorName>Wertz, Ruth</creatorName>
      <creatorName>Purzer, Senay</creatorName>
    <title>Critical Engineering Literacy Test (CELT)</title>

See also



Direct Accessing XML with Java


Processing of huge XML files can become cumbersome if your hardware is limited.

“Parsing a sample 20 MB XML document[1] containing Wikipedia document abstracts into a DOM tree using the Xerces library roughly consumes about 100 MB of RAM. Other document model implementations[2] such as Saxon’s TinyTree are more memory efficient; parsing the same document in Saxon consumes about 50 MB of memory. These numbers will vary with document contents, but generally the required memory scales linearly with document size, and is typically a single-digit multiple of the file size on disk.”

Probst, Martin. “Processing Arbitrarily Large XML using a Persistent DOM.” 2010. https://www.balisage.net/Proceedings/vol5/html/Probst01/BalisageVol5-Probst01.html

A good way to deal with huge files is to split them into smaller ones. But sometimes you don’t have that option.

Here is where Random Access comes into play. While random access of binary files is well supported by standard Java tools, this is not  true for higher-order text-based formats like XML.

The Plan

  1. Find proper access points, by taking XML structure into account.
  2. Translate character offsets  to byte offsets (take encoding into account)

This sounds straightforward.

Existing Libraries

The StAX library offers streaming access to XML data without the need of loading a complete DOM model into memory. The library comes with an XMLStreamReader offering a method getLocation().getCharacterOffset() .

But unfortunately this will only return character offsets. In order to access the file with standard java readers we need byte offsets. UTF-8 uses variable lengths for encoding characters.  This means that we have to reread the whole file from the beginning to calculate the byte offset from character offset. This seems not acceptable.


In the following I will introduce a solution, based on  a generated XML parser using ANTLR4.

  1. We will use the parser to walk through the XML file. While the parser is doing it’s work it will spit out byte offsets whenever a certain criteria is fulfilled (in the example we will search for XML-Elements with the name ‘page’).
  2. I will use the byte offsets to access the XML file and to read portions of XML into a Java bean using JAXB.

The Following works very well on a ~17GB Wikipedia dump/20170501/dewiki-20170501-pages-articles-multistream.xml.bz2 . I still had to increase heap size using -xX6GB but compared to a DOM approach this looks much more acceptable.

1. Get XML Grammar

cd /tmp
git clone https://github.com/antlr/grammars-v4

2. Generate Parser

cd /tmp/grammars-v4/xml/
mvn clean install

3. Copy Generated Java files to your Project

cp -r target/generated-sources/antlr4 /path/to/your/project/gen

4. Hook in with a Listener to collect character offsets

package stack43366566;

import java.util.ArrayList;
import java.util.List;

import org.antlr.v4.runtime.ANTLRFileStream;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.tree.ParseTreeWalker;

import stack43366566.gen.XMLLexer;
import stack43366566.gen.XMLParser;
import stack43366566.gen.XMLParser.DocumentContext;
import stack43366566.gen.XMLParserBaseListener;

public class FindXmlOffset {

    List<Integer> offsets = null;
    String searchForElement = null;

    public class MyXMLListener extends XMLParserBaseListener {
        public void enterElement(XMLParser.ElementContext ctx) {
            String name = ctx.Name().get(0).getText();
            if (searchForElement.equals(name)) {

    public List<Integer> createOffsets(String file, String elementName) {
        searchForElement = elementName;
        offsets = new ArrayList<>();
        try {
            XMLLexer lexer = new XMLLexer(new ANTLRFileStream(file));
            CommonTokenStream tokens = new CommonTokenStream(lexer);
            XMLParser parser = new XMLParser(tokens);
            DocumentContext ctx = parser.document();
            ParseTreeWalker walker = new ParseTreeWalker();
            MyXMLListener listener = new MyXMLListener();
            walker.walk(listener, ctx);
            return offsets;
        } catch (Exception e) {
            throw new RuntimeException(e);

    public static void main(String[] arg) {
        System.out.println("Search for offsets.");
        List<Integer> offsets = new FindXmlOffset().createOffsets("/tmp/dewiki-20170501-pages-articles-multistream.xml",
        System.out.println("Offsets: " + offsets);


5. Result


Offsets: [2441, 10854, 30257, 51419 ….

6. Read from Offset Position

To test the code I’ve written class that reads in each wikipedia page to a java object

class Page {
 public Page(){};
 public String title;

using basically this code

private Page readPage(Integer offset, String filename) {
        try (Reader in = new FileReader(filename)) {
            ObjectMapper mapper = new XmlMapper();
             mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
            Page object = mapper.readValue(in, Page.class);
            return object;
        } catch (Exception e) {
            throw new RuntimeException(e);


Find complete example on github.