[go: up one dir, main page]

File: README

package info (click to toggle)
recoll 1.43.4-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 16,468 kB
  • sloc: cpp: 103,827; python: 9,498; xml: 7,218; ansic: 6,447; sh: 1,212; perl: 130; makefile: 72
file content (7194 lines) | stat: -rw-r--r-- 352,891 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814
5815
5816
5817
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889
5890
5891
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949
5950
5951
5952
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987
5988
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019
6020
6021
6022
6023
6024
6025
6026
6027
6028
6029
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044
6045
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282
6283
6284
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307
6308
6309
6310
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329
6330
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353
6354
6355
6356
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383
6384
6385
6386
6387
6388
6389
6390
6391
6392
6393
6394
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406
6407
6408
6409
6410
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421
6422
6423
6424
6425
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558
6559
6560
6561
6562
6563
6564
6565
6566
6567
6568
6569
6570
6571
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664
6665
6666
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716
6717
6718
6719
6720
6721
6722
6723
6724
6725
6726
6727
6728
6729
6730
6731
6732
6733
6734
6735
6736
6737
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750
6751
6752
6753
6754
6755
6756
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781
6782
6783
6784
6785
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801
6802
6803
6804
6805
6806
6807
6808
6809
6810
6811
6812
6813
6814
6815
6816
6817
6818
6819
6820
6821
6822
6823
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834
6835
6836
6837
6838
6839
6840
6841
6842
6843
6844
6845
6846
6847
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877
6878
6879
6880
6881
6882
6883
6884
6885
6886
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975
6976
6977
6978
6979
6980
6981
6982
6983
6984
6985
6986
6987
6988
6989
6990
6991
6992
6993
6994
6995
6996
6997
6998
6999
7000
7001
7002
7003
7004
7005
7006
7007
7008
7009
7010
7011
7012
7013
7014
7015
7016
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026
7027
7028
7029
7030
7031
7032
7033
7034
7035
7036
7037
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072
7073
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085
7086
7087
7088
7089
7090
7091
7092
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193
7194
Recoll user manual

Jean-Francois Dockes

   <[1]jfd@recoll.org>

   Copyright © 2005-2025 Jean-Francois Dockes

   Permission is granted to copy, distribute and/or modify this document
   under the terms of the GNU Free Documentation License, Version 1.3 or
   any later version published by the Free Software Foundation; with no
   Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A
   copy of the license can be found at the following location: [2]GNU web
   site.

   This document introduces full text search notions and describes the
   installation and use of the Recoll application. This version describes
   Recoll 1.43.1.
     __________________________________________________________________

   Table of Contents

   1. [3]Introduction

        1.1. [4]Giving it a try
        1.2. [5]Full text search
        1.3. [6]Recoll overview

   2. [7]Indexing

        2.1. [8]Introduction

              2.1.1. [9]Indexing modes
              2.1.2. [10]The index configuration directory
              2.1.3. [11]Document types
              2.1.4. [12]Indexing failures
              2.1.5. [13]Recovery

        2.2. [14]Index storage

              2.2.1. [15]Xapian index formats
              2.2.2. [16]Security aspects
              2.2.3. [17]Special considerations for big indexes

        2.3. [18]Index configuration

              2.3.1. [19]The index configuration GUI
              2.3.2. [20]Multiple indexes

                    [21]Creating and using an additional index: Unix-like
                            systems example

                    [22]Creating an alternate index: Windows example

              2.3.3. [23]Index case and diacritics sensitivity

        2.4. [24]Indexing performance and resource usage

              2.4.1. [25]Indexing threads configuration (Unix-like
                      systems)

              2.4.2. [26]Using multiple temporary indexes to improve
                      indexing time (1.41.1)

              2.4.3. [27]Quieting down the indexing process

        2.5. [28]Index update scheduling

              2.5.1. [29]Periodic indexing
              2.5.2. [30]Real time indexing

        2.6. [31]Fields and metadata

              2.6.1. [32]Incorporating external metadata

                    [33]Unix-like systems and MacOS systems: using
                            extended attributes

                    [34]Using a command for importing external metadata

        2.7. [35]Miscellaneous indexing notes

              2.7.1. [36]Indexing punctuation characters (1.39)
              2.7.2. [37]The PDF input handler

                    [38]Extracting PDF outlines and bookmarks
                    [39]XMP fields extraction
                    [40]PDF attachment indexing

              2.7.3. [41]Running OCR on image documents

                    [42]OCR for PDF documents
                    [43]OCR for image documents

              2.7.4. [44]Running a speech to text program on audio files
              2.7.5. [45]Removable volumes
              2.7.6. [46]Unix-like systems: indexing visited Web pages

   3. [47]Searching

        3.1. [48]Introduction
        3.2. [49]Searching with the Qt graphical user interface (GUI)

              3.2.1. [50]Simple search
              3.2.2. [51]The filters panel (1.32)
              3.2.3. [52]The result list
              3.2.4. [53]The result table
              3.2.5. [54]The preview window
              3.2.6. [55]Assisted Complex Search (A.K.A. "Advanced
                      Search")

                    [56]Advanced search: the "find" tab
                    [57]Advanced search: the "filter" tab
                    [58]Advanced search history

              3.2.7. [59]Document history
              3.2.8. [60]Saving and restoring queries
              3.2.9. [61]Sorting search results and collapsing duplicates
              3.2.10. [62]The term explorer tool
              3.2.11. [63]The Query Fragments window
              3.2.12. [64]Searching across multiple indexes
              3.2.13. [65]Unix-like systems: displaying thumbnails
              3.2.14. [66]Unix-like systems: running arbitrary commands on
                      result files

              3.2.15. [67]Keyboard shortcuts
              3.2.16. [68]Search tips

                    [69]Terms and search expansion
                    [70]Working with phrases and proximity
                    [71]Others

              3.2.17. [72]Customising the search interface

                    [73]Choosing the viewer applications
                    [74]The GUI preferences dialog
                    [75]The result list format

              3.2.18. [76]The recoll GUI command line options

        3.3. [77]Searching with the KDE KIO slave
        3.4. [78]Searching on the command line
        3.5. [79]The query language

              3.5.1. [80]General syntax
              3.5.2. [81]Special field-like specifiers
              3.5.3. [82]Range clauses
              3.5.4. [83]Modifiers

        3.6. [84]Wildcards and anchored searches

              3.6.1. [85]Wildcards
              3.6.2. [86]Anchored searches

        3.7. [87]Using Synonyms
        3.8. [88]Path translations
        3.9. [89]Search case and diacritics sensitivity
        3.10. [90]Desktop integration

   4. [91]Programming interface

        4.1. [92]Writing a document input handler

              4.1.1. [93]Simple input handlers
              4.1.2. [94]"Multiple" handlers
              4.1.3. [95]Telling Recoll about the handler
              4.1.4. [96]Input handler output
              4.1.5. [97]Page numbers

        4.2. [98]Field data processing
        4.3. [99]Python API

              4.3.1. [100]Introduction
              4.3.2. [101]Interface elements
              4.3.3. [102]Log messages for Python scripts
              4.3.4. [103]Python search interface

                    [104]The recoll module
                    [105]The rclextract module
                    [106]Search API usage example
                    [107]The fsudi module

              4.3.5. [108]Python indexing interface

                    [109]Recoll external indexers
                    [110]The Python indexing API
                    [111]External indexers configuration
                    [112]External indexer samples
                    [113]Using an external indexer index in conjunction
                            with a regular one

   5. [114]Configuration

        5.1. [115]Settings, configuration overview
        5.2. [116]Environment variables
        5.3. [117]Recoll main configuration file, recoll.conf

              5.3.1. [118]Parameters affecting what documents we index
              5.3.2. [119]Parameters affecting how we generate terms and
                      organize the index

              5.3.3. [120]Parameters affecting where and how we store
                      things

              5.3.4. [121]Parameters affecting indexing performance and
                      resource usage

              5.3.5. [122]Miscellaneous parameters
              5.3.6. [123]Query-time parameters (no impact on the index)
              5.3.7. [124]Parameters for the PDF handler
              5.3.8. [125]Parameters for the ZIP file handler
              5.3.9. [126]Parameters for the Org mode handler
              5.3.10. [127]Parameters for the Thunderbird mbox handler
              5.3.11. [128]Parameters for OCR processing
              5.3.12. [129]Parameters for running speech to text
                      conversion

        5.4. [130]The fields file

              5.4.1. [131]Extended attributes in the fields file

        5.5. [132]The mimemap file
        5.6. [133]The mimeconf file
        5.7. [134]The mimeview file
        5.8. [135]The ptrans file
        5.9. [136]Examples of configuration adjustments

              5.9.1. [137]Adding an external viewer for an non-indexed
                      type

              5.9.2. [138]Adding indexing support for a new file type

   I. [139]Appendices

        A. [140]Processing of wild card and other special characters

              A.1. [141]Words and spans
              A.2. [142]Special ASCII characters during indexing

                    A.2.1. [143]Characters with hard-coded processing
                    A.2.2. [144]Characters generally treated as white
                            space

                    A.2.3. [145]Backslash

              A.3. [146]Special ASCII characters at search time

                    A.3.1. [147]Query language special characters
                    A.3.2. [148]Wild card characters

        B. [149]Building and Installation

              B.1. [150]Installing a binary copy
              B.2. [151]Supporting packages
              B.3. [152]Building from source

                    B.3.1. [153]Prerequisites
                    B.3.2. [154]Building
                    B.3.3. [155]Installing
                    B.3.4. [156]Python API package

   List of Tables

   3.1. [157]Keyboard shortcuts

Chapter 1. Introduction

   This document introduces full text search notions and describes the
   installation and use of the Recoll application. It is updated for
   Recoll 1.43.1.

Recoll on Windows

   Recoll was for a long time dedicated to Unix-like systems. It was only
   later ported to Microsoft Windows. Many references in this manual,
   especially file locations, are specific to Unix, and not valid on
   Windows, where some described features are also not available. The
   manual will be updated in time. Until this is completed: on Windows,
   most references to shared files can be translated by looking under the
   Windows Recoll installation directory (Typically C:/Program
   Files/Recoll). Especially, anything referenced inside /usr/share in
   this document will be found in the Share subdirectory of the
   installation). The user configuration is stored by default under
   AppData/Local/Recoll inside the user directory, along with the index
   itself.

1.1. Giving it a try

   If you do not like reading manuals (who does?) but wish to give Recoll
   a try, just [158]install the application and start the recoll graphical
   user interface (GUI), which will ask permission to index your home
   directory, allowing you to search immediately after indexing completes.

   Do not do this if your home directory contains a huge number of
   documents and you do not want to wait or are very short on disk space.
   In this case, you may first want to customise the [159]configuration to
   restrict the indexed area. From the recoll GUI go to: Preferences →
   Indexing configuration, then adjust the Start folders section (named
   Top directories in older Recoll versions), which defines the
   directories from which the filesystem exploration starts.

   By default, the indexer process writes all errors to its stderr output,
   which may be lost if you started the GUI from the desktop. You may find
   it useful to assign a file name to the Indexer log file name entry on
   the above indexing preferences screen. With the default level of 3,
   this will list all processed documents, and all errors (lines beginning
   with :2:).

   On Unix-like systems, you may need to install the appropriate
   [160]supporting applications for document types that need them (for
   example antiword for Microsoft Word files). The Windows package is
   self-contained and includes most useful auxiliary programs. After the
   indexing ran, the recoll GUI Tools → Missing helpers menu entry will
   show a list of missing supporting applications for the documents found
   in the indexed area.

1.2. Full text search

   Recoll is a full text search application, which means that it finds
   your data by content rather than by external attributes (like the file
   name). You specify words (terms) which should or should not appear in
   the text you are looking for, and receive in return a list of matching
   documents, ordered so that the most relevant documents will appear
   first.

   You do not need to remember in what file or email message you stored a
   given piece of information. You just ask for related terms, and the
   tool will return a list of documents where these terms are prominent,
   in a similar way to Internet search engines.

   Full text search applications try to determine which documents are most
   relevant to the search terms you provide. Computer algorithms for
   determining relevance can be very complex, and in general are inferior
   to the power of the human mind to rapidly determine relevance. The
   quality of relevance guessing is probably the most important aspect
   when evaluating a search application. Recoll relies on the Xapian
   probabilistic information retrieval library to determine relevance.

   In many cases, you are looking for all the forms of a word, including
   plurals, different tenses for a verb, or terms derived from the same
   root or stem (example: floor, floors, floored, flooring...). Queries
   are usually automatically expanded to all such related terms (words
   that reduce to the same stem). This can be prevented for searching for
   a specific form.

   Stemming, by itself, does not accommodate for misspellings or phonetic
   searches. A full text search application may also support this form of
   approximation. For example, a search for aliterattion returning no
   result might propose alliteration, alteration, alterations, or
   altercation as possible replacement terms. Recoll bases its suggestions
   on the actual index contents, so that suggestions may be made for words
   which would not appear in a standard dictionary.

1.3. Recoll overview

   Recoll uses the [161]Xapian information retrieval library as its
   storage and retrieval engine. Xapian is a very mature package using
   [162]a sophisticated probabilistic ranking model.

   The Xapian library manages an index database which describes where
   terms appear in your document files. It efficiently processes the
   complex queries which are produced by the Recoll query expansion
   mechanism, and is in charge of the all-important relevance computation
   task.

   Recoll provides the mechanisms and interface to get data into and out
   of the index. This includes translating the many possible document
   formats into pure text, handling term variations (using Xapian
   stemmers), and spelling approximations (using the aspell speller),
   interpreting user queries and presenting results.

   In a shorter way, Recoll does the dirty footwork, Xapian deals with the
   intelligent parts of the process.

   The Xapian index can be big (roughly the size of the original document
   set), but it is not a document archive. Recoll can only fully display
   documents that still exist at the place from which they were indexed.
   However, recent Recoll version do store the plain text from all indexed
   documents.

   Recoll stores all internal data in Unicode UTF-8 format, and it can
   index many types of files with different character sets, encodings, and
   languages into the same index. It can process documents embedded inside
   other documents (for example a PDF document stored inside a Zip archive
   sent as an email attachment...), down to an arbitrary depth.

   East asian texts are difficult to segment into words. By default,
   Recoll processes them by generating terms as arbitrary sequences of
   consecutive characters (n-grams). However, it has provisions to
   integrate with language-aware text segmenters for [163]Chinese and
   [164]Korean which will produce a smaller index and improved search.

   Stemming is the process by which Recoll reduces words to their radicals
   so that searching does not depend, for example, on a word being
   singular or plural (floor, floors), or on a verb tense (flooring,
   floored). Because the mechanisms used for stemming depend on the
   specific grammatical rules for each language, there is a separate
   Xapian stemmer module for most common languages where stemming makes
   sense.

   Recoll stores the unstemmed versions of terms in the main index and
   uses auxiliary databases for term expansion (one for each stemming
   language), which means that you can switch stemming languages between
   searches, or add a language without needing a full reindex.

   Storing documents written in different languages in the same index is
   possible, and commonly done. In this situation, you can specify several
   stemming languages for the index.

   Recoll currently makes no attempt at automatic language recognition,
   which means that the stemmer will sometimes be applied to terms from
   other languages with potentially strange results. In practise, even if
   this introduces possibilities of confusion, this approach has been
   proven quite useful, and it is much less cumbersome than separating
   your documents according to what language they are written in.

   By default, Recoll strips most accents and diacritics from terms, and
   converts them to lower case before either storing them in the index or
   searching for them. As a consequence, it is impossible to search for a
   particular capitalization of a term (US / us), or to discriminate two
   terms based on diacritics (sake / saké, mate / maté).

   Recoll can optionally store the raw terms, without accent stripping or
   case conversion. In this configuration, default searches will behave as
   before, but it is possible to perform searches sensitive to case and
   diacritics. This is described in more detail in the section about
   [165]index case and diacritics sensitivity.

   Recoll uses many parameters to define exactly what to index, and how to
   classify and decode the source documents. These are kept in
   [166]configuration files. A default configuration is copied into a
   standard location (usually something like /usr/share/recoll/examples)
   during installation. The default values set by the configuration files
   in this directory may be overridden by values set inside your personal
   configuration. With the default configuration, Recoll will index your
   home directory with generic parameters. Most common parameters can be
   set by using configuration menus in the recoll GUI. Some less common
   parameters can only be set by editing the text files.

   The [167]indexing process is started automatically (after asking
   permission), the first time you execute the recoll GUI. Index updating
   or rebuild can later be performed by executing the recollindex command,
   in a command window, or from the GUI File menu. Recoll indexing is
   multithreaded by default (except on Windows) when appropriate hardware
   resources are available, and can perform multiple tasks in parallel for
   text extraction, segmentation and index updates.

   [168]Searches are usually performed inside the recoll GUI, which has
   many options to help you find what you are looking for. However, there
   are other ways to query the index:
     * A [169]command line interface.
     * The [170]Recoll WebUI.
     * A Gnome Shell [171]Search Provider.
     * A [172]Python programming interface
     * [173]KDE KIO worker and krunner modules.

Chapter 2. Indexing

2.1. Introduction

   Indexing is the process by which the set of documents is analyzed and
   the data entered into the database. Recoll indexing is normally
   incremental: documents will only be processed if they have been
   modified since the last run. On the first execution, all documents will
   need processing. A full index build can be forced later by specifying
   an option to the indexing command (recollindex -z or -Z).

   recollindex skips files which caused an error during a previous pass.
   This is a performance optimization, and the command line option -k can
   be set to retry failed files, for example after updating an input
   handler.

   When a file has been deleted, recollindex removes the corresponding
   data from the index. The exact moment when this happens depends on the
   indexing mode. There are provisions to [174]avoid deleting data for an
   umounted removable volume.

   The following sections give an overview of different aspects of the
   indexing processes and configuration, with links to detailed sections.

   Depending on your data, temporary files may be created during indexing,
   some of them possibly quite big. You can set the RECOLL_TMPDIR
   environment variable to determine where they are created. If
   RECOLL_TMPDIR is not set, Recoll will fall back to other locations
   depending on the system. On Unix-like systems and MacOS systems TMPDIR,
   TMP and TEMP will be tried before falling back to /tmp/. On Windows,
   Recoll will call the GetTempPath() function. Using the system normal
   mechanism instead of RECOLL_TMPDIR has the nice property that the
   auxiliary commands executed by recollindex should then create their own
   temporary files in the same location.

2.1.1. Indexing modes

   Recoll indexing can be performed along two main modes:
     * [175]Periodic (or batch) indexing .  recollindex is executed at
       discrete times. On Unix-like systems, the typical usage is to have
       a nightly run [176]programmed into your cron file. On Windows, the
       Task Scheduler can be used to run indexing. In both cases, the
       Recoll GUI includes a simplified interface to configure the system
       scheduler.
     * [177]Real time indexing .  recollindex runs permanently as a daemon
       and uses a file system alteration monitor (e.g. inotify on
       Unix-like systems) to detect file changes. New or updated files are
       indexed at once. Monitoring a big file system tree can consume
       significant system resources.

Choosing an indexing mode

   The choice between the two methods is mostly a matter of preference,
   and they can be combined by setting up multiple indexes (e.g.: use
   periodic indexing on a big documentation directory, and real time
   indexing on a small home directory), or by [178]configuring the index
   so that only a subset of the tree will be monitored.

   The choice of method and the parameters used can be configured from the
   recoll GUI: Preferences → Indexing schedule dialog.

2.1.2. The index configuration directory

   A Recoll index is defined by its configuration directory. A
   configuration directory contains [179]several files which describe what
   should be indexed and how.

   When recoll or recollindex is first executed, it creates a default
   configuration directory, located in $HOME/.recoll/ on Unix-like systems
   and MacOS systems and %LOCALAPPDATA%/Recoll on Windows (typically
   C:/Users/[me]/Appdata/Local/Recoll).

   All configuration parameters have defaults, defined in system-wide
   files. Without further customisation, the default configuration will
   process your complete home directory, with a reasonable set of
   defaults. It can be adjusted to process different areas of the file
   storage, select files in different ways, and many other things.

   The index configuration can be edited either by using the recoll GUI
   Preferences->Index configuration dialog, or by directly editing the
   configuration files.

   A single index can process data from any subset of the computer
   accessible storage, as defined by the configuration variables. The most
   important one is topdirs (Start directories in the GUI), which defines
   the directories to be recursively indexed. Its default value is ~,
   which translates to your home directory.

   In some cases, it may be useful to create additional configuration
   directories, for example, to separate personal and shared indexes, or
   to take advantage of the organization of your data to improve search
   precision. See the section about [180]configuring multiple indexes for
   more detail.

2.1.3. Document types

   Recoll knows about quite a few different document types. The parameters
   for document types recognition and processing are set in
   [181]configuration files.

   Most file types, like HTML or word processing files, only hold one
   document. Some file types, like email folders or zip archives, can hold
   many individually indexed documents, which may themselves be compound
   ones. Such hierarchies can go quite deep, and Recoll can process, for
   example, a LibreOffice document stored as an attachment to an email
   message inside an email folder archived in a zip file...

   recollindex processes plain text, HTML, OpenDocument
   (Open/LibreOffice), email formats, and a few others internally.

   Other file types (e.g.: postscript, pdf, ms-word, rtf ...) need
   external applications for preprocessing. The list is in the
   [182]installation section. After every indexing operation, Recoll
   updates a list of commands that would be needed for indexing existing
   files types. This list can be displayed by selecting the menu option
   File → Show Missing Helpers in the recoll GUI. It is stored in the
   missing text file inside the configuration directory.

   After installing a missing handler, you may need to tell recollindex to
   retry the failed files, by adding option -k to the command line, or by
   using the GUI File → Special indexing menu. This is because
   recollindex, in its default operation mode, will not retry files which
   caused an error during an earlier pass. In special cases, it may be
   useful to reset the data for a category of files before indexing. See
   the recollindex manual page. If your index is not too big, it may be
   simpler to just reset it.

   By default, Recoll will try to index any file type that it has a way to
   read. This is sometimes not desirable, and there are ways to either
   exclude some types, or on the contrary define a positive list of types
   to be indexed. In the latter case, any type not in the list will be
   ignored. A detailed description of the parameters involved can be found
   in the [183]document selection section of this manual.

   For example, to define an exclusive list of MIME types to be indexed,
   you would set the [184]indexedmimetypes configuration variable:
indexedmimetypes = text/html application/pdf

   It is possible to redefine a parameter for subdirectories. Example:
[/path/to/my/dir]
indexedmimetypes = application/pdf

   When using sections like this, don't forget that they remain in effect
   until the end of the file or another section indicator.

   As another example, excluding files by name can be done by adding
   wildcard name patterns to the [185]skippedNames list, Excluding by type
   can be done by setting the [186]excludedmimetypes value.

   Most parameters can be set either by editing the [187]configuration
   file (recoll.conf) for the index, or by using the GUI Index
   configuration menu.

Note about MIME types

   When editing the indexedmimetypes or excludedmimetypes lists, you
   should use the MIME values listed in the [188]mimemap file or in Recoll
   result lists rather than file -i output: there are a number of
   differences. The system command output should only be used for files
   without extensions, or for which the extension is not listed in
   mimemap.

2.1.4. Indexing failures

   Indexing may fail for some documents, for a number of reasons: a helper
   program may be missing, the document may be corrupt, we may fail to
   uncompress a file because no file system space is available, etc.

   The Recoll indexer does not retry failed files by default, because some
   indexing failures can be quite costly (for example failing to
   uncompress a big file because of insufficient disk space). Retrying
   will only occur if an explicit option (-k) is set on the recollindex
   command line, or if a script executed when recollindex starts up says
   so. The script is defined by a configuration variable
   (checkneedretryindexscript), and makes a rather lame attempt at
   deciding if a helper command may have been installed, by checking if
   any of the common bin directories have changed.

2.1.5. Recovery

   In the rare case where the index becomes corrupted (which can signal
   itself by weird search results or crashes), the index files need to be
   erased before restarting a clean indexing pass. Just delete the
   xapiandb directory (see [189]next section), or, alternatively, start
   the next recollindex with the -z option, which will reset the database
   before indexing. The difference between the two methods is that the
   second will not change the current index format, which may be
   undesirable if a newer format is supported by the Xapian version.

2.2. Index storage

   The default index location is the xapiandb subdirectory of the Recoll
   configuration directory, typically $HOME/.recoll/xapiandb/ on Unix-like
   systems or C:/Users/[me]/Appdata/Local/Recoll/xapiandb on Windows. This
   can be changed via two different methods (with different purposes):
     * For a given configuration directory, you can specify a non-default
       storage location for the index by setting the [190]dbdir parameter
       in the configuration file. Use this method to keep the
       configuration directory in its default location, and use another
       location for the index, typically because of disk occupation or
       performance reasons.
     * You can specify a different configuration directory by setting the
       RECOLL_CONFDIR environment variable, or using the -c option to the
       Recoll commands. In turn, the related index would be stored either
       under the configuration directory, or elsewhere if dbdir is locally
       set. This method would typically be used in special cases to index
       different areas of the file system to different indexes. See the
       section about [191]configuring multiple indexes for more detail.

   There are quite a few more parameters which can be set in the
   configuration file itself for tailoring Recoll data storage. They are
   described in a [192]section of the configuration chapter.

   The size of the index is determined by the size of the set of
   documents, but the ratio can vary a lot. For a typical mixed set of
   documents, the index size will often be close to the data set size. In
   specific cases (a set of compressed mbox files for example), the index
   can become much bigger than the documents. It may also be much smaller
   if the documents contain a lot of images or other non-indexed data (an
   extreme example being a set of mp3 files where only the tags would be
   indexed).

   Of course, images, sound and video do not increase the index size,
   which means that in most cases, the space used by the index will be
   negligible compared to the total amount of data on the computer.

   The index data directory (xapiandb) only contains data that can be
   completely rebuilt by an index run (as long as the original documents
   exist), and it can always be destroyed safely.

2.2.1. Xapian index formats

   Xapian versions usually support several formats for index storage. A
   given major Xapian version will have a current format, used to create
   new indexes, and will also support the format from the previous major
   version.

   Xapian will not convert automatically an existing index from the older
   format to the newer one. If you want to upgrade to the new format, or
   if a very old index needs to be converted because its format is not
   supported any more, you will have to explicitly delete the old index
   (typically ~/.recoll/xapiandb), then run a normal indexing command.
   Using recollindex option -z would not work in this situation.

2.2.2. Security aspects

   The Recoll index holds almost complete copies of the indexed documents.
   If confidential data is indexed, access to the database directory must
   be restricted.

   Recoll will create the configuration directory with a mode of 0700 on
   Unix-like systems (access by owner only). As the index data directory
   is by default a sub-directory of the configuration directory, this
   should result in appropriate protection.

   If you use another setup, you should think of the kind of protection
   you need for your index, set the directory and files access modes
   appropriately, and also maybe adjust the umask used during index
   updates.

2.2.3. Special considerations for big indexes

   This only needs concern you if your index is going to be bigger than
   around 10 GBytes. Most people have much smaller indexes. For reference,
   10 GBytes would be around 4000 bibles, a lot of text. If you have a
   huge text dataset (remember: images don't count, the text content of
   PDFs is typically less than 5% of the file size), read on.

   Recoll (thanks to Xapian) can manage huge indexes: in 2025, we heard of
   a 550 GB, 11+ million documents index. Big indexes just need a bit of
   thinking ahead and organisation (and appropriate hardware).

   The amount of writing performed by Xapian during index creation is not
   linear with the index size (it is somewhere between linear and
   quadratic). For big indexes this becomes a performance issue, and may
   even be an SSD disk wear issue.

   The problem can be mitigated by using the following approaches:
     * Partition the data set and create several indexes of smaller size
       rather than a huge one. These indexes can then be queried in
       parallel (using the Recoll external indexes facility), or merged
       using xapian-compact.
     * Have a lot of RAM available and set the idxflushmb Recoll
       configuration parameter as high as you can without swapping
       (experimentation will be needed). 200 would be a bare minimum in
       this context.
     * Use Xapian 1.4.10 or newer, as this version brought a significant
       improvement in the amount of writes.

   Recoll versions 1.38 and newer have an option to use [193]multiple
   temporary indexes and a final merge internally. This was designed as a
   CPU performance optimization (increasing parallelism), but it may also
   provide a simple solution for the index size issue, though it may not
   give enough control over the temporary indexes physical placement for
   really huge datasets.

2.3. Index configuration

   Variables stored inside the [194]Recoll configuration files control
   which areas of the file system are indexed, and how files are
   processed. The values can be set by editing the text files. Most of the
   more commonly used ones can also be adjusted by using the [195]dialogs
   in the recoll GUI.

   The first time you start recoll, you will be asked whether or not you
   would like it to build the index. If you want to adjust the
   configuration before indexing, just click Cancel at this point, which
   will get you into the configuration interface. If you exit at this
   point, recoll will have created a default configuration directory with
   empty configuration files, which you can then edit.

   The configuration is documented inside the [196]configuration chapter
   of this document, or in the [197]recoll.conf(5) manual page. Both
   documents are automatically generated from the comments inside the
   configuration file.

   The most immediately useful variable is [198]topdirs, which lists the
   subtrees and files to be indexed. The variable name is a bit misleading
   for native English speakers, so the corresponding GUI label is Start
   folders.

   The applications needed to index file types other than text, HTML or
   email (e.g.: pdf, postscript, ms-word...) are described in the
   [199]external packages section.

   There are two incompatible types of Recoll indexes, depending on the
   treatment of character case and diacritics. A [200]further section
   describes the two types in more detail. The default type is appropriate
   in most cases.

2.3.1. The index configuration GUI

   Most index configuration parameters can be set from the recoll GUI (set
   RECOLL_CONFDIR or use the -c option to affect a non-default index.)

   The interface is started from the Preferences → Index Configuration
   menu entry. It is divided in four tabs, Global parameters, Local
   parameters, Web history ([201]details) and Search parameters.

   The Global parameters tab allows setting global variables, like the
   lists of top/start directories, skipped paths, or stemming languages.

   The Local parameters tab allows setting variables that can be redefined
   for subdirectories. This second tab has an initially empty list of
   customisation directories, to which you can add. The variables are then
   set for the currently selected directory (or at the top level if the
   empty line is selected).

   The Search parameters section defines parameters which are used at
   query time, but are global to an index and affect all search tools, not
   only the GUI.

   The meaning for most entries in the interface is self-evident and
   documented by a ToolTip popup on the text label. For more detail, you
   may need to refer to the [202]configuration section of this guide.

   The configuration tool normally respects the comments and most of the
   formatting inside the configuration file, so that it is quite possible
   to use it on hand-edited files, which you might nevertheless want to
   backup first...

2.3.2. Multiple indexes

   Multiple Recoll indexes can be created by using several configuration
   directories which would typically be set to index different areas of
   the file system.

   A plausible usage scenario for the multiple index feature would be for
   a system administrator to set up a central index for shared data, that
   you choose to search or not in addition to your personal data. Of
   course, there are other possibilities. for example, there are many
   cases where you know the subset of files that should be searched, and
   where narrowing the search can improve the results. You can achieve
   approximately the same effect by using a directory filter clause in a
   search, but multiple indexes may have better performance and may be
   worth the trouble with huge data sets.

   A more advanced use case would be to use multiple indexes to improve
   indexing performance, by updating several indexes in parallel (using
   multiple CPU cores and disks, or possibly several machines), and then
   either merging them, or querying them in parallel.

   A specific configuration can be selected by setting the RECOLL_CONFDIR
   environment variable or giving the -c option to recoll and recollindex.

   The recollindex program, used for creating or updating indexes, always
   works on a single index. The different configurations are entirely
   independent (no parameters are ever shared between configurations when
   indexing).

   All the search interfaces (recoll, recollq, the Python API, etc.)
   operate with a main configuration, from which both configuration and
   index data are used, and can also query data from multiple additional
   indexes. Only the index data from additional indexes is used, their
   configuration parameters are ignored. This implies that some parameters
   should be consistent among index configurations which are to be used
   together.

   When searching, the current main index (defined by RECOLL_CONFDIR or
   -c) is always active. If this is undesirable, you can set up your base
   configuration to index an empty directory.

   Index configuration parameters can be set either by using a text editor
   on the files, or, for most parameters, by using the [203]recoll index
   configuration GUI. In the latter case, the configuration directory for
   which parameters are modified is the one which was selected by
   RECOLL_CONFDIR or the -c parameter, and there is no way to switch
   configurations within the GUI.

   See the [204]configuration section for a detailed description of the
   parameters

   Some configuration parameters must be consistent among a set of
   multiple indexes used together for searches. Most importantly, all
   indexes to be queried concurrently must have the same option concerning
   character case and diacritics stripping, but there are other
   constraints. Most of the relevant parameters affect the [205]term
   generation.

   Using multiple configurations implies a small level of command line or
   file manager usage. The user must explicitly create additional
   configuration directories, the GUI will not do it. This is to avoid
   mistakenly creating additional directories when an argument is
   mistyped. Also, the GUI or the indexer must be launched with a specific
   option or environment to work on the right configuration.

   To start a new configuration, you need to create an empty directory in
   a location of your choice, and then instruct recoll or recollindex to
   use it by setting either a command line option (-c /some/directory), or
   an environment variable (RECOLL_CONFDIR=/some/directory). Any
   modification performed by the commands (e.g. configuration
   customisation or searches by recoll or index creation by recollindex)
   would then apply to the new directory and not to the default one.

Creating and using an additional index: Unix-like systems example

   The following applies to Unix-like systems

   Initially creating the configuration and index:
mkdir /path/to/my/new/config

   Configuring the new index can be done from the recoll GUI, launched
   from the command line to pass the -c option (you could create a desktop
   file to do it for you), and then using the [206]GUI index configuration
   tool to set up the index.
recoll -c /path/to/my/new/config

   Alternatively, you can just start a text editor on the main
   configuration file:
someEditor /path/to/my/new/config/[207]recoll.conf

   Creating and updating the index can be done from the command line:
recollindex -c /path/to/my/new/config

   or from the File menu of a GUI launched with the same option (recoll,
   see above).

   The same GUI would also let you set up batch indexing for the new
   index. Real time indexing can only be set up from the GUI for the
   default index (the menu entry will be inactive if the GUI was started
   with a non-default -c option).

   The new index can be queried alone with:
recoll -c /path/to/my/new/config

   Or, in parallel with the default index, by starting recoll without a -c
   option, and using the External Indexes tab in the preferences dialog,
   which can be reached either trough: Preferences → GUI Configuration →
   External Index Dialog or Query → External index dialog. See the
   [208]GUI external indexes section for more details.

Creating an alternate index: Windows example

   When running Recoll under Windows, the simplest approach for using
   separate indexes is to start the GUI from different desktop icons. The
   following approach can be used:
    1. Create an empty folder somewhere for holding the new configuration
       and index.
    2. Select the Recoll icon on the desktop and Copy/Paste it. If no
       desktop icon was created during installation, you can right-drag
       the recoll.exe program from C:\Program Files\Recoll to the desktop
       and select Create shortcuts here to create one.
    3. Right-click the new shortcut and go to the Properties->shortcut tab
    4. Modify the Target value from the original C:\Program
       Files\Recoll\recoll.exe to something like:
"C:\Program Files\Recoll\recoll.exe" -c C:\Path\To\My\New\Directory
       Use double quotes around the directory path is it contains spaces.
    5. Then save the new Icon by clicking ok, and double click it to start
       a Recoll GUI for the new configuration. You should be presented
       with the initial configuration dialog.

   Any other method for running the GUI or recollindex program with a -c
   option or a RECOLL_CONFDIR value in the environment would work too.

2.3.3. Index case and diacritics sensitivity

   You have a choice of building an index with terms stripped of character
   case and diacritics, or one with raw terms. For a source term of
   Résumé, the former will store resume, the latter Résumé.

   Each type of index allows performing searches insensitive to case and
   diacritics: with a raw index, the user entry will be expanded to match
   all case and diacritics variations present in the index. With a
   stripped index, the search term will be stripped before searching.

   A raw index allows using case and diacritics to discriminate between
   terms, e.g., returning different results when searching for US and us
   or resume and résumé. Read the [209]section about search case and
   diacritics sensitivity for more details.

   The type of index to be created is controlled by the indexStripChars
   configuration variable which can only be changed by editing the
   configuration file. Any change implies an index reset (not automated by
   Recoll), and all indexes in a search must be set in the same way
   (again, not checked by Recoll).

   Recoll creates a stripped index by default if indexStripChars is not
   set.

   As a cost for added capability, a raw index will be slightly bigger
   than a stripped one (around 10%). Also, searches will be more complex,
   so probably slightly slower, and the feature is relatively little used,
   so that a certain amount of weirdness cannot be excluded.

   One of the most adverse consequence of using a raw index is that some
   phrase and proximity searches may become impossible: because each term
   needs to be expanded, and all combinations searched for, the
   multiplicative expansion may become unmanageable.

2.4. Indexing performance and resource usage

2.4.1. Indexing threads configuration (Unix-like systems)

   Note: you don't probably don't need to read this. The default automatic
   configuration is fine is most cases. Only the part about disabling
   multithreading may be more commonly useful, so I'll prepend it here. In
   recoll.conf:
thrQSizes = -1 -1 -1

   The Recoll indexing process recollindex can use multithreading to speed
   up indexing on multiprocessor systems. This is currently enabled on
   MacOS systems and Unix-like systems systems, but not under Windows.

   The data processing used to index files is divided in several stages
   and some of the stages can be executed by multiple threads. The stages
   are:
    1. File system walking: this is always performed by the main thread.
    2. File conversion and data extraction.
    3. Text processing (splitting, stemming, etc.).
    4. Xapian index update.

   You can also read a [210]longer document about the transformation of
   Recoll indexing to multithreading.

   The threads configuration is controlled by two configuration file
   parameters.

   thrQSizes
          This variable defines the job input queues configuration. There
          are three possible queues for stages 2, 3 and 4, and this
          parameter should give the queue depth for each stage (three
          integer values). If a value of -1 is used for a given stage, no
          queue is used, and the thread will go on performing the next
          stage. In practise, deep queues have not been shown to increase
          performance. A value of 0 for the first queue tells Recoll to
          perform autoconfiguration (no need for anything else in this
          case, thrTCounts is not used) - this is the default
          configuration.

   thrTCounts
          This defines the number of threads used for each stage. If a
          value of -1 is used for one of the queue depths, the
          corresponding thread count is ignored. It makes no sense to use
          a value other than 1 for the last stage because updating the
          Xapian index is necessarily single-threaded (and protected by a
          mutex).

Note

   If the first value in thrQSizes is 0, thrTCounts is ignored.

   The following example would use three queues (of depth 2), and 4
   threads for converting source documents, 2 for processing their text,
   and one to update the index. This was tested to be the best
   configuration on the test system (quadri-processor with multiple
   disks).
thrQSizes = 2 2 2
thrTCounts = 4 2 1

   The following example would use a single queue, and the complete
   processing for each document would be performed by a single thread
   (several documents will still be processed in parallel in most cases).
   The threads will use mutual exclusion when entering the index update
   stage. In practise the performance would be close to the precedent case
   in general, but worse in certain cases (e.g. a Zip archive would be
   performed purely sequentially), so the previous approach is preferred.
   YMMV... The 2 last values for thrTCounts are ignored.
thrQSizes = 2 -1 -1
thrTCounts = 6 1 1

   The following example would disable multithreading. Indexing will be
   performed by a single thread.
thrQSizes = -1 -1 -1

2.4.2. Using multiple temporary indexes to improve indexing time (1.41.1)

Note

   The underlying code is buggy between 1.38 and 1.41.0, fixed in 1.41.1.
   The bug affects the storing of document texts inside the index, so it
   only affects snippets generation inside result lists. If the result
   lists snippets are important to you, do not use the function with an
   affected release.

   In some cases, either when the input documents are simple and require
   little processing (e.g. HTML files), or possibly with a high number of
   available cores, the single-threaded Xapian index updates can become
   the performance bottleneck for indexing.

   In this case, it is possible to configure the indexer to use multiple
   temporary indexes which are merged at the end of the operation. This
   can provide a huge gain in performance, but, as opposed to
   multithreading for document preparation, it can also have a (slight)
   negative impact in some cases, so that it is not enabled by default.

   In most cases, this should also be turned off after the initial index
   creation is done, because it is extremely detrimental to the speed of
   small incremental updates.

   The parameter which controls the number of temporary indexes in
   recoll.conf is named thrTmpDbCnt. The default value is 0, meaning that
   no temporary indexes are used.

   If your document set is big, and you are using a processor with many
   cores for indexing, especially if the input documents are simple, it
   may be worth it to experiment with the value. For example, with a
   partial Wikipedia dump (many HTML small files), indexing times could be
   divided almost by three, by using four temporary indexes on a quad-core
   machine. More detail in this [211]article on the Recoll Web site.

   All the tests were performed on SSDs, it is quite probable that this
   approach would not work well on spinning disks, at least not in its
   current form.

2.4.3. Quieting down the indexing process

   The Recoll indexing process, recollindex is usually configured to have
   very low priority and not disturb other activity on the machine. Still,
   on an idle system, even with multithreading disabled, it will use 100%
   of one core if needed and available. This may be enough to get a laptop
   fan to spin up in some cases. To prevent this, we want to limit the CPU
   utilisation for every short time quanta (e.g. not more than 20 mS for
   every 100 mS).

   This would be extremely difficult to do from inside the indexing
   process, because of the many places where intensive CPU usage takes
   place, some not under our control (Xapian or external helpers). On
   Linux systems, you can use the cgroup facility to throttle the process
   CPU usage. This is further documented on the [212]Recoll Web site.

2.5. Index update scheduling

2.5.1. Periodic indexing

Running the indexer

   The recollindex program performs index updates. You can start it either
   from the command line or from the File menu in the recoll GUI program.
   When started from the GUI, the indexing will run on the same
   configuration recoll was started on. When started from the command
   line, recollindex will use the RECOLL_CONFDIR variable or accept a -c
   confdir option to specify a non-default configuration directory.

   If the recoll program finds no index when it starts, it will
   automatically start indexing (except if canceled).

   The GUI File menu has entries to start or stop the current indexing
   operation. When indexing is not currently running, you have a choice
   between Update Index or Rebuild Index. The first choice only processes
   changed files, the second one erases the index before starting so that
   all files are processed.

   The GUI can also be used to manage the indexing operation. Stopping the
   indexer can be done from the recoll GUI File → Stop Indexing menu
   entry.

   On Unix-like systems, the recollindex indexing process can be
   interrupted by sending an interrupt (Ctrl-C, SIGINT) or terminate
   (SIGTERM) signal.

   When stopped, some time may elapse before recollindex exits, because it
   needs to properly write data to disk (flush), and close the index.

   After an interruption, the index will be somewhat inconsistent because
   some operations which are normally performed at the end of the indexing
   pass will have been skipped (for example, the stemming and spelling
   databases will be inexistent or out of date). You just need to restart
   indexing at a later time to restore consistency. The indexing will
   restart at the interruption point (the full file tree will be
   traversed, but files that were indexed up to the interruption and for
   which the index is still up to date will not be reindexed).

recollindex command line

   recollindex has many options which are listed in its [213]manual page.
   Only a few will be described here.

   Option -z will reset the index when starting. This is almost the same
   as destroying the index files (the nuance is that the Xapian format
   version will not be changed).

   Option -Z will force the update of all documents without resetting the
   index first. This will not have the "clean start" aspect of -z, but the
   advantage is that the index will remain available for querying while it
   is rebuilt, which can be a significant advantage if it is very big
   (some installations need days for a full index rebuild).

   Option -k will force retrying files which previously failed to be
   indexed, for example because of a missing helper program.

   Of special interest also, maybe, are the -i and -f options. -i allows
   indexing an explicit list of files (given as command line parameters or
   read on stdin). -f tells recollindex to ignore file selection
   parameters from the configuration. Together, these options allow
   building a custom file selection process for some area of the file
   system, by adding the top directory to the skippedPaths list and using
   an appropriate file selection method to build the file list to be fed
   to recollindex -if. Trivial example:
find . -name indexable.txt -print | recollindex -if

   recollindex -i will not descend into subdirectories specified as
   parameters, but just add them as index entries. It is up to the
   external file selection method to build the complete file list.

Unix-like systems: using cron to automate indexing

   The most common way to set up indexing is to have a cron task execute
   it every night. For example the following crontab entry would do it
   every day at 3:30AM (supposing recollindex is in your PATH):
30 3 * * * recollindex > /some/tmp/dir/recolltrace 2>&1

   Or, using anacron:
1  15  su mylogin -c "recollindex recollindex > /tmp/rcltraceme 2>&1"

   The Recoll GUI has dialogs to manage crontab entries for recollindex.
   You can reach them from the Preferences → Indexing Schedule menu. They
   only work with the good old cron, and do not give access to all
   features of cron scheduling. Entries created via the tool are marked
   with a RCLCRON_RCLINDEX= marker so that the tool knows which entries
   belong to it. As a side effect, this sets an environment variable for
   the process, but it's not actually used, this is just a marker.

   The usual command to edit your crontab is crontab -e (which will
   usually start the vi editor to edit the file). You may have more
   sophisticated tools available on your system.

   Please be aware that there may be differences between your usual
   interactive command line environment and the one seen by crontab
   commands. Especially the PATH variable may be of concern. Please check
   the crontab manual pages about possible issues.

2.5.2. Real time indexing

   Real time monitoring/indexing is performed by starting the recollindex
   -m command. With this option, recollindex will permanently monitor file
   changes and update the index.

   On Windows systems, the monitoring process is started from the recoll
   GUI File menu. On Unix-like systems, there are other possibilities, see
   the following sections.

   When this is in use, the recoll GUI File menu makes two operations
   available: Stop and Trigger incremental pass.

   Trigger incremental pass has the same effect as restarting the indexer,
   and will cause a complete walk of the indexed area, processing the
   changed files, then switch to monitoring. This is only marginally
   useful, maybe in cases where the indexer is configured to delay
   updates, or to force an immediate rebuild of the stemming and phonetic
   data, which are only processed at intervals by the real time indexer.

   While it is convenient that data is indexed in real time, repeated
   indexing can generate a significant load on the system when files such
   as email folders change. Also, monitoring large file trees by itself
   significantly taxes system resources. You probably do not want to
   enable it if your system is short on resources. Periodic indexing is
   adequate in most cases.

   As of Recoll 1.24, you can set the [214]monitordirs configuration
   variable to specify that only a subset of your indexed files will be
   monitored for instant indexing. In this situation, an incremental pass
   on the full tree can be triggered by either restarting the indexer, or
   just running recollindex, which will notify the running process. The
   recoll GUI also has a menu entry for this.

Unix-like systems: automatic daemon start with systemd

   The installation contains two example files (in share/recoll/examples)
   for starting the indexing daemon with systemd.

   recollindex.service would be used for starting recollindex as a user
   service. The indexer will start when the user logs in and run while
   there is a session open for them.

   recollindex@.service is a template service which would be used for
   starting the indexer at boot time, running as a specific user. It can
   be useful when running the text search as a shared service (e.g. when
   users access it through the Web UI).

   If configured to do so, the unit files should have been installed in
   your system's default systemd paths (usually /usr/lib/systemd/system/
   and /usr/lib/systemd/user/). If not, you may need to copy the files
   there before starting the service.

   With the unit files installed in the proper location, the user unit can
   be started with the following commands:
systemctl --user daemon-reload
systemctl --user enable --now recollindex.service

   The system unit file can be enabled for a particular user by running,
   as root:
systemctl daemon-reload
systemctl enable --now recollindex@username.service

   (A valid user name should be substituted for username, of course.)

Unix-like systems: automatic daemon start from the desktop session

   Under KDE, Gnome and some other desktop environments, the daemon can
   automatically started when you log in, by creating a desktop file
   inside the ~/.config/autostart directory. This can be done for you by
   the Recoll GUI. Use the Preferences->Indexing Schedule menu.

   With older X11 setups, starting the daemon is normally performed as
   part of the user session script.

   The rclmon.sh script can be used to easily start and stop the daemon.
   It can be found in the examples directory (typically
   /usr/local/[share/]recoll/examples).

   For example, a good old xdm-based session could have a .xsession script
   with the following lines at the end:
recollconf=$HOME/.recoll-home
recolldata=/usr/local/share/recoll
RECOLL_CONFDIR=$recollconf $recolldata/examples/rclmon.sh start

fvwm

   The indexing daemon gets started, then the window manager, for which
   the session waits.

   By default the indexing daemon will monitor the state of the X11
   session, and exit when it finishes, it is not necessary to kill it
   explicitly. (The X11 server monitoring can be disabled with option -x
   to recollindex).

   If you use the daemon completely out of an X11 session, you need to add
   option -x to disable X11 session monitoring (else the daemon will not
   start).

Miscellaneous details

   Logging.  By default, the messages from the indexing daemon will be
   sent to the same file as those from the interactive commands
   (logfilename). You may want to change this by setting the
   daemlogfilename and daemloglevel configuration parameters. Also the log
   file will only be truncated when the daemon starts. If the daemon runs
   permanently, the log file may grow quite big, depending on the log
   level.

   Unix-like systems: increasing resources for inotify.  On Linux systems,
   monitoring a big tree may need increasing the resources available to
   inotify, which are normally defined in /etc/sysctl.conf.
### inotify
#
# cat  /proc/sys/fs/inotify/max_queued_events   - 16384
# cat  /proc/sys/fs/inotify/max_user_instances  - 128
# cat  /proc/sys/fs/inotify/max_user_watches    - 16384
#
# -- Change to:
#
fs.inotify.max_queued_events=32768
fs.inotify.max_user_instances=256
fs.inotify.max_user_watches=32768

   Especially, you will need to trim your tree or adjust the
   max_user_watches value if indexing exits with a message about errno
   ENOSPC (28) from inotify_add_watch.

   Slowing down the reindexing rate for fast changing files.  When using
   the real time monitor, it may happen that some files need to be
   indexed, but change so often that they impose an excessive load for the
   system. Recoll provides a configuration option to specify the minimum
   time before which a file, specified by a wildcard pattern, cannot be
   reindexed. See the mondelaypatterns parameter in the [215]configuration
   section.

2.6. Fields and metadata

   Apart from the main text content, documents usually aggregate other
   data elements, such as an author names, dates, abstracts, etc. These
   are usually called metadata elements because they qualify or describe
   the data rather than being part of it. Recoll has a slightly more
   general notion of field to mean any named piece of data associated with
   a document.

   Fields are extracted by the document handlers when processing a
   document and further used by Recoll for searching or displaying
   results.

   Some fields, like e.g. a file modification time, have a strict and
   predefined usage. For most fields though, the processing is entirely
   configurable and defined in the [216]fields configuration file

   Fields have two main processing options (at least one of which will be
   set if they are processed at all):
     * Their content can be indexed. This makes them searchable.
     * Their content can be stored in the index as document attribute
       data. This makes them displayable as part of a result list entry.

   These options are preset in the default fields file for common elements
   like a title or an author name.

   The terms from indexed fields are stored in the inverted index with a
   specific prefix, which makes them searchable by specifying the field
   name (e.g. author:Balzac). The terms can optionally also be used for
   the main index section to provide hits for non-prefixed searches. This
   is decided by an attribute in the fields file.

   In most cases, field data is provided by the document itself, for
   example, by HTML <meta> elements. They can also be obtained from other
   sources, this is described in the following section.

2.6.1. Incorporating external metadata

Unix-like systems and MacOS systems: using extended attributes

   User extended attributes are named pieces of information that most
   modern file systems can attach to any file.

   Recoll processes all extended attributes as document fields. Note that
   most fields are not indexed by default, you need to activate them by
   defining a prefix in the [217]fields configuration file.

   A [218]freedesktop standard defines a few special attributes, which are
   handled as such by Recoll:

   mime_type
          If set, this overrides any other determination of the file MIME
          type.

   charset
          If set, this defines the file character set (mostly useful for
          plain text files).

   By default, other attributes are handled as Recoll fields of the same
   name, after removing the "user" prefix on Linux.

   The name translation can be configured more precisely, inside the
   [219]fields configuration file.

Setting the document modification/creation date

   Some documents have an internal date attribute (e.g. emails), but most
   get their date from the file modification time. It is possible to set a
   document date different from the file's by setting a specific extended
   attribute. For obscure and uninteresting reasons, the name of the
   attribute is harcoded as modificationdate. Its contents should be the
   ASCII representation of a decimal integer representing the Unix time
   (seconds since the epoch). An example Linux command line for setting
   this particular field follow. The substituted date prints the example
   date parameter in Unix time format (seconds since the epoch).
setfattr -n user.modificationdate -v `date -d '2022-09-30 08:30:00' +%s` /some/f
ile

   The date substitution will then be automatic, you do not need to
   customise the fields file.

Using a command for importing external metadata

   During indexing, it is possible to import metadata for each file by
   executing commands. This allows, for example, extracting tag data from
   an external application and storing it in a field for indexing.

   See the [220]section about the metadatacmds field in the main
   configuration chapter for a description of the configuration syntax.

   For example, if you would want Recoll to use tags managed by tmsu in a
   field named tags, you would add the following to the configuration
   file:
[/some/area/of/the/fs]
metadatacmds = ; tags = tmsu tags %f

   Note the initial semi-colon after the equal sign.

   You may want to restrict this processing to a subset of the directory
   tree, because it may slow down indexing a bit ([some/area/of/the/fs]).

   In the example above, the output of tmsu is used to set a field named
   tags. The field name is arbitrary and could be tmsu or myfield just the
   same, but tags is an alias for the standard Recoll keywords field, and
   the tmsu output will just augment its contents. This will avoid the
   need to extend the [221]field configuration.

Note

   Depending on the tmsu version, you may need/want to add options like
   --database=/some/db.

   After setting or updating the parameter, you will need to tell Recoll
   to reindex the affected files. Just reset the index or see recollindex
   options -e or -r.

   You will then be able to search the field from the query language:
   tags:some/alternate/values or tags:all,these,values.

   Tags changes will not be detected by the indexer if the file itself did
   not change. One possible workaround would be to update the file ctime
   when you modify the tags, which would be consistent with how extended
   attributes function. A pair of chmod commands could accomplish this, or
   a touch -a. Alternatively, just couple the tag update with a
   recollindex -e -i /path/to/the/file.

2.7. Miscellaneous indexing notes

2.7.1. Indexing punctuation characters (1.39)

   By default, the Recoll indexer only uses most non-alphanumeric
   characters as separators, treating them as white space, so that inputs
   like all words, and all,words produce the same terms.

   It may sometimes be useful to index some of these characters so that
   they can be used as discriminants for searches. This can be done by
   setting the indexedpunctuation configuration parameter. The value is an
   UTF-8 string, for example, setting:
indexedpunctuation = %€

   would allow searching separately 100% or 100€.

   The affected characters are indexed as terms with their own term
   positions, and they are their own separators, so that 100% and 100 %
   would be equivalent inputs.

2.7.2. The PDF input handler

   The PDF format is very important for scientific and technical
   documentation, and document archival. It has extensive facilities for
   storing metadata along with the document, and these facilities are
   actually used in the real world.

   In consequence, the rclpdf.py PDF input handler has more complex
   capabilities than most others, and it is also more configurable,
   because some extra features need executing external commands, so that
   they are not enabled by default. Specifically, rclpdf.py has the
   following optional features:
     * It can extract PDF outlines and bookmarks.
     * It can be configured to extract specific metadata tags from an XMP
       packet.
     * It can extract PDF attachments.
     * It can automatically perform OCR if the document text is empty.
       This is done by executing an external program and is now described
       in a [222]separate section, because the OCR framework can also be
       used with non-PDF image files.

Extracting PDF outlines and bookmarks

   These data elements will be extracted if pdfoutline=1 is set in the
   configuration file and the pdftohtml command (from poppler-tools) is
   available. Executing the command takes extra time, which is why the
   feature is not enabled by default.

XMP fields extraction

   The rclpdf.py script in Recoll version 1.23.2 and later can extract XMP
   metadata fields by executing the pdfinfo command (usually found with
   poppler-utils). This is controlled by the [223]pdfextrameta
   configuration variable, which specifies which tags to extract and,
   possibly, how to rename them.

   The [224]pdfextrametafix variable can be used to designate a file with
   Python code to edit the metadata fields (available for Recoll 1.23.3
   and later. 1.23.2 has equivalent code inside the handler script).
   Example:
import sys
import re

class MetaFixer(object):
    def __init__(self):
        pass

    def metafix(self, nm, txt):
        if nm == 'bibtex:pages':
            txt = re.sub(r'--', '-', txt)
        elif nm == 'someothername':
            # do something else
           pass
        elif nm == 'stillanother':
           # etc.
          pass
        return txt
    def wrapup(self, metaheaders):
        pass

   If the 'metafix()' method is defined, it is called for each metadata
   field. A new MetaFixer object is created for each PDF document (so the
   object can keep state for, for example, eliminating duplicate values).
   If the 'wrapup()' method is defined, it is called at the end of XMP
   fields processing with the whole metadata as parameter, as an array of
   '(nm, val)' pairs, allowing an alternate approach for editing or
   adding/deleting fields.

   See [225]this page for a more detailed discussion about indexing PDF
   XMP properties.

PDF attachment indexing

   Indexing PDF attachments used to be done with the pdftk toolkit and was
   disabled by default. As of Recoll 1.43.1, it uses the Poppler pdfdetach
   command, and it is enabled in the default configuration. Set the
   [226]pdfattach configure variable to 0 to disable the feature.

   The PDF attachments are indexed as sub-documents of the PDF file.

2.7.3. Running OCR on image documents

   The Recoll PDF handler (rclpdf.py), and the alternate image handler
   (rclimg.py) have the ability to call an external OCR program (only as
   of Recoll 1.43.3 for the latter).

   The operation details are slightly different for PDF and other image
   documents.

   To enable the Recoll OCR feature, you need to install one of the
   supported OCR applications (tesseract or ABBYY), enable OCR in the PDF
   or image handler by setting the appropriate configuration parameter,
   tell Recoll how to run the OCR by setting the specific OCR
   [227]configuration variables. All parameters can be localized in
   subdirectories through the usual main configuration mechanism (path
   sections).

   This facility got a major update in Recoll 1.26.5. Older versions had a
   more limited, non-caching capability to execute an external OCR program
   in the PDF handler. The new function has the following features:
     * The OCR output is cached, stored as separate files. The caching is
       ultimately based on a hash value of the original file contents, so
       that it is immune to file renames. A first path-based layer ensures
       fast operation for unchanged (unmoved files), and the data hash
       (which is still orders of magnitude faster than OCR) is only
       re-computed if the file has moved. OCR is only performed if the
       file was not previously processed or if it changed.
     * The support for a specific OCR program is implemented in a simple
       Python module. It should be straightforward to add support for any
       OCR engine with a capability to run from the command line.
     * Modules initially exist for tesseract (Linux and Windows), and
       ABBYY FineReader (Linux, tested with version 11). ABBYY FineReader
       is a commercial closed source program, but it sometimes perform
       better than tesseract.

OCR for PDF documents

   It must be noted that, if modifying the files (or a copy) is
   acceptable, then using [228]OCRmyPDF to add a text layer to the PDF
   itself is a better solution than using the Recoll OCR feature: e.g.
   allowing Recoll to position the PDF viewer on the search target when
   opening the document, and permitting secondary search in the native
   tool.

   The Recoll OCR is enabled by the pdfocr configuration variable, and
   will only be executed if the processed file has no text content.

   Example configuration fragment in recoll.conf:
pdfocr = 1
ocrprogs = tesseract
tesseractlang = eng

   The pdfocr variable can be set globally or for specific subtrees.

OCR for image documents

   As of Recoll 1.43.3, the alternate Python rclimg.py handler can execute
   OCR on image files. The default image handler is the Perl-based rclimg
   script and has not been OCR-enabled. So, for performing image OCR, you
   need to tell Recoll to use the alternate handler and also to enable OCR
   by setting the imgocr variable.

   If you are running an older Recoll release, you can grab an up to date
   copy of rclimg.py from the [229]git repository. You will have to copy
   it to the Recoll filters/ directory and make it executable. The script
   needs to run from the installation directory because of how it runs the
   OCR script.

   Example configuration:

   In $RECOLL_CONFDIR/mimeconf (e.g. ~/.recoll/mimeconf):
[index]
image/gif = execm rclimg.py
image/jp2 = execm rclimg.py
image/jpeg = execm rclimg.py
image/png = execm rclimg.py
image/tiff = execm rclimg.py
image/x-nikon-nef = execm rclimg.py
image/x-xcf = execm rclimg.py

   Of course you can also only use a subset of the image types.

   In $RECOLL_CONFDIR/recoll.conf:
ocrprogs = tesseract
tesseractlang = eng
[/path/to/my/images/directory]
imgocr = 1

2.7.4. Running a speech to text program on audio files

   If the OpenAI Whisper program is available and the appropriate
   parameters set in the configuration files, the Recoll audio file
   handler will run speech to text recognition on audio files and the
   resulting text will be indexed. See the [230]the FAQ entry for more
   details.

   The results of the speech recognition will be cached in the same manner
   as the results of image OCR.

2.7.5. Removable volumes

   Recoll used to have no support for indexing removable volumes (portable
   disks, USB keys, etc.). Recent versions have improved the situation and
   support indexing removable volumes in two different ways:
     * By indexing the volume in the main, fixed, index, and ensuring that
       the volume data is not purged if the indexing runs while the volume
       is mounted. (since Recoll 1.25.2).
     * By storing a volume index on the volume itself (since Recoll 1.24).

Indexing removable volumes in the main index

   As of version 1.25.2, Recoll provides a simple way to ensure that the
   index data for an absent volume will not be purged. Two conditions must
   be met:
     * The volume mount point must be a member of the topdirs list.
     * The mount directory must be empty (when the volume is not mounted).

   If recollindex finds that one of the topdirs is empty when starting up,
   any existing data for the tree will be preserved by the indexing pass
   (no purge for this area).

Self contained volumes

   As of Recoll 1.24, it has become possible to build self-contained
   datasets including a Recoll configuration directory and index together
   with the indexed documents, and to move such a dataset around (for
   example copying it to an USB drive), without having to adjust the
   configuration for querying the index.

Note

   This is a query-time feature only. The index must only be updated in
   its original location. If an update is necessary in a different
   location, the index must be reset.

   The principle of operation is that the configuration stores the
   location of the original configuration directory, which must reside on
   the movable volume. If the volume is later mounted elsewhere, Recoll
   adjusts the paths stored inside the index by the difference between the
   original and current locations of the configuration directory.

   To make a long story short, here follows a script to create a Recoll
   configuration and index under a given directory (given as single
   parameter). The resulting data set (files + recoll directory) can later
   to be moved to a CDROM or thumb drive. Longer explanations come after
   the script.
#!/bin/sh

fatal()
{
    echo $*;exit 1
}
usage()
{
    fatal "Usage: init-recoll-volume.sh <top-directory>"
}

test $# = 1 || usage
topdir=$1
test -d "$topdir" || fatal $topdir should be a directory

confdir="$topdir/recoll-config"
test ! -d "$confdir" || fatal $confdir should not exist

mkdir "$confdir"
cd "$topdir"
topdir=`pwd`
cd "$confdir"
confdir=`pwd`

(echo topdirs = '"'$topdir'"'; \
    echo orgidxconfdir = $topdir/recoll-config) > "$confdir/recoll.conf"

recollindex -c "$confdir"

   The examples below will assume that you have a dataset under
   /home/me/mydata/, with the index configuration and data stored inside
   /home/me/mydata/recoll-confdir.

   In order to be able to run queries after the dataset has been moved,
   you must ensure the following:
     * The main configuration file must define the [231]orgidxconfdir
       variable to be the original location of the configuration directory
       (orgidxconfdir=/home/me/mydata/recoll-confdir must be set inside
       /home/me/mydata/recoll-confdir/recoll.conf in the example above).
     * The configuration directory must exist with the documents,
       somewhere under the directory which will be moved. E.g. if you are
       moving /home/me/mydata around, the configuration directory must
       exist somewhere below this point, for example
       /home/me/mydata/recoll-confdir, or
       /home/me/mydata/sub/recoll-confdir.
     * You should keep the default locations for the index elements which
       are relative to the configuration directory by default (principally
       dbdir). Only the paths referring to the documents themselves (e.g.
       topdirs values) should be absolute (in general, they are only used
       when indexing anyway).

   Only the first point needs an explicit user action, the Recoll defaults
   are compatible with the third one, and the second is natural.

   If, after the move, the configuration directory needs to be copied out
   of the dataset (for example because the thumb drive is too slow), you
   can set the [232]curidxconfdir, variable inside the copied
   configuration to define the location of the moved one. For example if
   /home/me/mydata is now mounted onto /media/me/somelabel, but the
   configuration directory and index has been copied to /tmp/tempconfig,
   you would set curidxconfdir to /media/me/somelabel/recoll-confdir
   inside /tmp/tempconfig/recoll.conf. orgidxconfdir would still be
   /home/me/mydata/recoll-confdir in the original and the copy.

   If you are regularly copying the configuration out of the dataset, it
   will be useful to write a script to automate the procedure. This can't
   really be done inside Recoll because there are probably many possible
   variants. One example would be to copy the configuration to make it
   writable, but keep the index data on the medium because it is too big -
   in this case, the script would also need to set dbdir in the copied
   configuration.

   The same set of modifications (Recoll 1.24) has also made it possible
   to run queries from a readonly configuration directory (with slightly
   reduced function of course, such as not recording the query history).

2.7.6. Unix-like systems: indexing visited Web pages

   With the help of a Firefox extension, Recoll can index the Internet
   pages that you visit. The extension has a long history: it was
   initially designed for the Beagle indexer, then adapted to Recoll and
   the Firefox XUL API. The current version of the extension is located in
   the [233]Mozilla add-ons repository uses the WebExtensions API, and
   works with current Firefox versions.

   The extension works by copying visited Web pages to an indexing queue
   directory, which Recoll then processes, storing the data into a local
   cache, then indexing it, then removing the file from the queue.

The local cache is not an archive

   As mentioned above, a copy of the indexed Web pages is retained by
   Recoll in a local cache (from which data is fetched for previews, or
   when resetting the index). The cache is not changed by an index reset,
   just read for indexing. The cache has a maximum size, which can be
   adjusted from the Index configuration / Web history panel
   (webcachemaxmbs parameter in recoll.conf). Once the maximum size is
   reached, old pages are erased to make room for new ones. The pages
   which you want to keep indefinitely need to be explicitly archived
   elsewhere. Using a very high value for the cache size can avoid data
   erasure, but see the above 'Howto' page for more details and gotchas.

   The visited Web pages indexing feature can be enabled on the Recoll
   side from the GUI Index configuration panel, or by editing the
   configuration file (set processwebqueue to 1).

   The Recoll GUI has a tool to list and edit the contents of the Web
   cache. (Tools → Webcache editor)

   The recollindex command has two options to help manage the Web cache:
     * --webcache-compact will recover the space from erased entries. It
       may need to use twice the disk space currently needed for the Web
       cache.
     * --webcache-burst destdir will extract all current entries into
       pairs of metadata and data files created inside destdir

   You can find more details on Web indexing, its usage and configuration
   in a [234]Recoll 'Howto' entry.

Chapter 3. Searching

3.1. Introduction

   Getting answers to specific queries is of course the whole point of
   Recoll. The multiple provided interfaces always understand simple
   queries made of one or several words, and return appropriate results in
   most cases.

   In order to make the most of Recoll though, it may be worthwhile to
   understand how it processes your input. Five different modes exist:
     * In All Terms mode, Recoll looks for documents containing all your
       input terms.
     * The Query Language mode behaves like All Terms in the absence of
       special input, but it can also do much more. This is the best mode
       for getting the most of Recoll. It is usable from all possible
       interfaces (GUI, command line, Web UI, ...), and is [235]described
       here.
     * In Any Term mode, Recoll looks for documents containing any your
       input terms, preferring those which contain more.
     * In File Name mode, Recoll will only match file names, not content.
       Using a small subset of the index allows things like left-hand
       wildcards without performance issues, and may sometimes be useful.
     * The GUI Advanced Search mode is actually not more powerful than the
       query language, but it helps you build complex queries without
       having to remember the language, and avoids any interpretation
       ambiguity, as it bypasses the user input parser.

   These five input modes are supported by the different user interfaces
   which are described in the following sections.

3.2. Searching with the Qt graphical user interface (GUI)

   The recoll program provides the main user interface for searching. It
   is based on the Qt library.

   recoll has two search interfaces:
     * Simple search (the default, on the main screen) has a single entry
       field where you can enter multiple words or a query language query.
     * Advanced search (a panel accessed through the Tools menu or the
       toolbox bar icon) has multiple entry fields, which you may use to
       build a logical condition, with additional filtering on file type,
       location in the file system, modification date, and size.

   The Advanced Search tool is easier to use, but not actually more
   powerful, than the Simple Search in query language mode. Its name is
   historical, but Assisted Search would probably have been a better
   designation.

   In most text areas, you can enter the terms as you think them, even if
   they contain embedded punctuation or other non-textual characters (e.g.
   Recoll can handle things like email addresses).

   The main case where you should enter text differently from how it is
   printed is for east-asian languages (Chinese, Japanese, Korean). Words
   composed of single or multiple characters should be entered separated
   by white space in this case (they would typically be printed without
   white space).

   Some searches can be quite complex, and you may want to re-use them
   later, perhaps with some tweaking. Recoll can save and restore
   searches. See [236]Saving and restoring queries.

3.2.1. Simple search

    1. Start the recoll program.
    2. Possibly choose a search mode: Any term, All terms, File name or
       Query language.
    3. Enter search term(s) in the text field at the top of the window.
    4. Click the Search button or hit the Enter key to start the search.

   The initial default search mode is [237]Query language. Without special
   directives, this will look for documents containing all of the search
   terms (the ones with more terms will get better scores), just like the
   All Terms mode.

   Any term will search for documents where at least one of the terms
   appear.

   File name will exclusively look for file names, not contents

   All search modes allow terms to be expanded with wildcards characters
   (*, ?, []). See the [238]section about wildcards for more details.

   In all modes except File name, you can search for exact phrases
   (adjacent words in a given order) by enclosing the input inside double
   quotes. Ex: "virtual reality".

   The Query Language features are described in [239]a separate section.

   When using a stripped index (the default), character case has no
   influence on search, except that you can disable stem expansion for any
   term by capitalizing it. E.g.: a search for floor will also normally
   look for flooring, floored, etc., but a search for Floor will only look
   for floor, in any character case. Stemming can also be disabled
   globally in the preferences. When using a raw index, [240]the rules are
   a bit more complicated.

   Recoll remembers the last few searches that you performed. You can
   directly access the search history by clicking the clock button on the
   right of the search entry, while the latter is empty. Otherwise, the
   history is used for entry completion (see next). Only the search texts
   are remembered, not the mode (all/any/file name).

   While text is entered in the search area, recoll will display possible
   completions, filtered from the history and the index search terms. This
   can be disabled with a GUI Preferences option.

   Double-clicking on a word in the result list or a preview window will
   insert it into the simple search entry field.

   You can cut and paste any text into an All terms or Any term search
   field, punctuation, newlines and all - except for wildcard characters
   (single ? characters are ok). Recoll will process it and produce a
   meaningful search. This is what most differentiates this mode from the
   Query Language mode, where you have to care about the syntax.

   The File name search mode will specifically look for file names. The
   point of having a separate file name search is that wildcard expansion
   can be performed more efficiently on a small subset of the index
   (allowing wildcards on the left of terms without excessive cost).
   Things to know:
     * White space in the entry should match white space in the file name,
       and is not treated specially.
     * The search is insensitive to character case and accents,
       independently of the type of index.
     * An entry without any wildcard character and not capitalized will be
       prepended and appended with '*' (e.g.: etc -> *etc*, but Etc ->
       etc).
     * If you have a big index (many files), excessively generic fragments
       may result in inefficient searches.

3.2.2. The filters panel (1.32)

   By default, the GUI displays the filters panel on the left of the
   results area. You can adjust the width of the panel, and hide it by
   squeezing it completely. The width will be memorized for the next
   session.

   The panel is only active in Query Language search mode, and it allows
   filtering by date or filesystem location, by adding date: and dir:
   clauses to the effective query text.

   The dates filter can be activated by clicking the checkbox. It has two
   assisted date entry widgets, for the minimum and maximum dates of the
   search period.

   The directory filter displays a subset of the filesystem directories,
   reduced to the indexed area, as defined by the topdirs list and the
   name exclusion parameters. Some directories may not be shown at all,
   depending on their (lack of) indexable content and other indexing
   parameters.

   By default, the depth of the displayed tree is limited at 2 levels
   under the start directories. You can change this in the GUI
   Preferences, User interface panel. Please note that increasing the
   depth can strongly delay the GUI startup, you should experiment by
   single increments if you want to change the value.

   You can independantly select and deselect directories by clicking them.
   Note that selecting a directory will activate the whole subtree for
   searching, there is no need to select the subdirectories, and no way to
   exclude some of them (use [241]Query language dir: clauses if this is
   needed).

3.2.3. The result list

   After starting a search, a list of results will instantly be displayed
   in the main window.

   By default, the document list is presented in order of relevance (how
   well the application estimates that the document matches the query).
   You can sort the results by ascending or descending date by using the
   vertical arrows in the toolbar.

   Each result is displayed as a structured text paragraph. The standard
   format is typically adequate, but the content and presentation are
   [242]entirely customisable.

   Most results will contain Preview and Open clickable links.

   Clicking the Preview link will open an internal preview window for the
   document. Further Preview clicks for the same search will open tabs in
   the existing preview window. You can use Shift+Click to force the
   creation of another preview window, which may be useful to view the
   documents side by side. (You can also browse successive results in a
   single preview window by typing Shift+ArrowUp/Down in the window).

   Clicking the Open link will start an external viewer for the document.
   By default, Recoll lets the desktop choose the appropriate application
   for most document types. See [243]further for customising the
   applications.

   The Preview and Open links may not be present for all entries. They are
   only available, respectively, for documents with MIME types that Recoll
   can extract text from, and for documents that have a configured viewer.
   However, you can modify the configuration to adjust this behavior. In
   more detail:
     * The Preview link will appear for documents with a MIME type present
       in the [index] section of the [244]mimeconf file, and, only if the
       textunknownasplain configuration variable is set, for all types
       identified as a subtype of text (text/*).
     * The Open link will appear for documents with a MIME type present in
       the [view] section of the [245]mimeview configuration file. If
       textunknownasplain is set and no specific viewer is found for a
       subtype of text, the viewer for text/plain will be used.

   You can click on the Query details link at the top of the results page
   to see the actual Xapian query, after stem expansion and other
   processing.

   Double-clicking on any word inside the result list or a preview window
   will insert it into the simple search text.

   The result list is divided into pages. You can change the page size in
   the preferences. Use the arrow buttons in the toolbar or the links at
   the bottom of the page to browse the results.

No results: the spelling suggestions

   When a search yields no result, and if the aspell dictionary is
   configured, Recoll will try to check for misspellings among the query
   terms, and will propose lists of replacements. Clicking on one of the
   suggestions will replace the word and restart the search. You can hold
   any of the modifier keys (Ctrl, Shift, etc.) while clicking if you
   would rather stay on the suggestion screen because several terms need
   replacement.

The result list right-click menu

   Apart from the preview and edit links, you can display a pop-up menu by
   right-clicking over a paragraph in the result list. This menu has the
   following entries:
     * Preview
     * Open
     * Open With
     * Run Script
     * Copy File Name
     * Copy Url
     * Save to File
     * Find similar
     * Preview Parent document
     * Open Parent document
     * Open Snippets Window

   The Preview and Open entries do the same thing as the corresponding
   links.

   Open With (Unix-like systems) lets you open the document with one of
   the applications claiming to be able to handle its MIME type (the
   information comes from the .desktop files in /usr/share/applications).

   Run Script allows starting an arbitrary command on the result file. It
   will only appear for results which are top-level files. See
   [246]further for a more detailed description.

   The Copy File Name and Copy Url copy the relevant data to the
   clipboard, for later pasting.

   Save to File allows saving the contents of a result document to a
   chosen file. This entry will only appear if the document does not
   correspond to an existing file, but is a subdocument inside such a file
   (e.g.: an email attachment). It is especially useful to extract
   attachments with no associated editor.

   The Open/Preview Parent document entries allow working with the higher
   level document (e.g. the email message an attachment comes from).
   Recoll is sometimes not totally accurate as to what it can or can't do
   in this area. For example the Parent entry will also appear for an
   email which is part of an mbox folder file, but you can't actually
   visualize the mbox (there will be an error dialog if you try).

   If the document is a top-level file, Open Parent will start the default
   file manager on the enclosing filesystem directory.

   The Find similar entry will select a number of relevant term from the
   current document and enter them into the simple search field. You can
   then start a simple search, with a good chance of finding documents
   related to the current result. I can't remember a single instance where
   this function was actually useful to me...

   The Open Snippets Window entry will only appear for documents which
   support page breaks (typically PDF, Postscript, DVI). The snippets
   window lists extracts from the document, taken around search terms
   occurrences, along with the corresponding page number, as links which
   can be used to start the native viewer on the appropriate page. If the
   viewer supports it, its search function will also be primed with one of
   the search terms.

3.2.4. The result table

   As an alternative to the result list, the results can also be displayed
   in spreadsheet-like fashion. You can switch to this presentation by
   clicking the table-like icon in the toolbar (this is a toggle, click
   again to restore the list).

   Clicking on the column headers will allow sorting by the values in the
   column. You can click again to invert the order, and use the header
   right-click menu to reset sorting to the default relevance order (you
   can also use the sort-by-date arrows to do this).

   Both the list and the table display the same underlying results. The
   sort order set from the table is still active if you switch back to the
   list mode. You can click twice on a date sort arrow to reset it from
   there.

   The header right-click menu allows adding or deleting columns. The
   columns can be resized, and their order can be changed (by dragging).
   All the changes are recorded when you quit recoll

   Hovering over a table row will update the detail area at the bottom of
   the window with the corresponding values. You can click the row to
   freeze the display. The bottom area is equivalent to a result list
   paragraph, with links for starting a preview or a native application,
   and an equivalent right-click menu. Typing Esc (the Escape key) will
   unfreeze the display.

   Using Shift-click on a row will display the document extracted text
   (somewhat like a preview) instead of the document details. The
   functions of Click and Shift-Click can be reversed in the GUI
   preferences.

3.2.5. The preview window

   The preview window opens when you first click a Preview link inside the
   result list.

   Subsequent preview requests for a given search open new tabs in the
   existing window (except if you hold the Shift key while clicking which
   will open a new window for side by side viewing).

   Starting another search and requesting a preview will create a new
   preview window. The old one stays open until you close it.

   You can close a preview tab by typing Ctrl-W (Ctrl + W) in the window.
   Closing the last tab, or using the window manager button in the top of
   the frame will also close the window.

   You can display successive or previous documents from the result list
   inside a preview tab by typing Shift+Down or Shift+Up (Down and Up are
   the arrow keys).

   A right-click menu in the text area allows switching between displaying
   the main text or the contents of fields associated to the document
   (e.g.: author, abtract, etc.). This is especially useful in cases where
   the term match did not occur in the main text but in one of the fields.
   In the case of images, you can switch between three displays: the image
   itself, the image metadata as extracted by exiftool (used as main body
   text) and the fields.

   You can print the current preview window contents by typing Ctrl-P
   (Ctrl + P) in the window text.

Searching inside the preview

   The preview window has an internal search capability, mostly controlled
   by the panel at the bottom of the window, which works in two modes: as
   a classical editor incremental search, where we look for the text
   entered in the entry zone, or as a way to walk the matches between the
   document and the Recoll query that found it.

   Incremental text search
          The preview tabs have an internal incremental search function.
          You initiate the search either by typing a / (slash) or CTL-F
          inside the text area or by clicking into the Search for: text
          field and entering the search string. You can then use the Next
          and Previous buttons to find the next/previous occurrence. You
          can also type F3 inside the text area to get to the next
          occurrence.

          If you have a search string entered and you use
          Ctrl-Up/Ctrl-Down to browse the results, the search is initiated
          for each successive document. If the string is found, the cursor
          will be positioned at the first occurrence of the search string.

   Walking the match lists
          If the entry area is empty when you click the Next or Previous
          buttons, the editor will be scrolled to show the next match to
          any search term (the next highlighted zone). If you select a
          search group from the dropdown list and click Next or Previous,
          the match list for this group will be walked. This is not the
          same as a text search, because the occurrences will include
          non-exact matches (as caused by stemming or wildcards). The
          search will revert to the text mode as soon as you edit the
          entry area.

3.2.6. Assisted Complex Search (A.K.A. "Advanced Search")

   The advanced search dialog helps you build more complex queries without
   having to memorize the search language constructs.

   The dialog can be opened through the Tools menu or through the main
   toolbar. There is also an option to have it open when the program
   starts. The results of the search are processed and displayed in the
   same way as the results from the "simple search".

   Recoll keeps a history of searches. See [247]Advanced search history.

   The dialog has two tabs:
    1. The first tab lets you specify terms to search for, and permits
       specifying multiple clauses which are combined to build the search.
    2. The second tab allows filtering the results according to file size,
       date of modification, MIME type, or location.

   Click on the Start Search button in the advanced search dialog, or type
   Enter in any text field to start the search. The button in the main
   window always performs a simple search.

   Click on the Show query details link at the top of the result page to
   see the query expansion.

Advanced search: the "find" tab

   This part of the dialog lets you construct a query by combining
   multiple clauses of different types. Each entry field is configurable
   for the following modes:
     * All terms.
     * Any term.
     * None of the terms.
     * Phrase (exact terms in order within an adjustable window).
     * Proximity (terms in any order within an adjustable window).
     * Filename search.

   Additional entry fields can be created by clicking the Add clause
   button.

   When searching, the non-empty clauses will be combined either with an
   AND or an OR conjunction, depending on the choice made on the left (All
   clauses or Any clause).

   Entries of all types except "Phrase" and "Near" accept a mix of single
   words and phrases enclosed in double quotes. Stemming and wildcard
   expansion will be performed as for simple search.

Phrase and Proximity searches

   These two clauses look for a group of terms in specified relative
   positions. They differ in the sense that the order of input terms is
   significant for phrase searches, but not for proximity searches. The
   latter do not impose an order on the words. In both cases, an
   adjustable number (slack) of non-matched words may be accepted between
   the searched ones. For phrase searches, the default count is zero
   (exact match). For proximity searches it is ten (meaning that two
   search terms, would be matched if found within a window of twelve
   words).

   Examples: a phrase search for quick fox with a slack of 0 will match
   quick fox but not quick brown fox. With a slack of 1 it will match the
   latter, but not fox quick. A proximity search for quick fox with the
   default slack will match the latter, and also a fox is a cunning and
   quick animal.

   The slack can be adjusted with the counter to the left of the input
   area

Advanced search: the "filter" tab

   This part of the dialog has several sections which allow filtering the
   results of a search according to a number of criteria
     * The first section allows filtering by dates of last modification.
       You can specify both a minimum and a maximum date. The initial
       values are set according to the oldest and newest documents found
       in the index.
     * The next section allows filtering the results by file size. There
       are two entries for minimum and maximum size. Enter decimal
       numbers. You can use suffix multipliers: k/K, m/M, g/G, t/T for
       10E3, 10E6, 10E9, 10E12 respectively.
     * The next section allows filtering the results by their MIME types,
       or MIME categories (e.g.: media/text/message/etc.).
       You can transfer the types between two boxes, to define which will
       be included or excluded by the search.
       The state of the file type selection can be saved as the default
       (the file type filter will not be activated at program start-up,
       but the lists will be in the restored state).
     * The bottom section allows restricting the search results to a
       sub-tree of the indexed area. You can use the Invert checkbox to
       search for files not in the sub-tree instead. If you use directory
       filtering often and on big subsets of the file system, you may
       think of setting up multiple indexes instead, as the performance
       may be better.
       You can use relative/partial paths for filtering. E.g., entering
       dirA/dirB would match either /dir1/dirA/dirB/myfile1 or
       /dir2/dirA/dirB/someother/myfile2.

Advanced search history

   The advanced search tool memorizes the last 100 searches performed. You
   can walk the saved searches by using the up and down arrow keys while
   the keyboard focus belongs to the advanced search dialog.

   The complex search history can be erased, along with the one for simple
   search, by selecting the File → Erase Search History menu entry.

3.2.7. Document history

   Documents that you actually view (with the internal preview or an
   external tool) are entered into the document history, which is
   remembered.

   You can display the history list by using the Tools/Doc History menu
   entry.

   You can erase the document history by using the Erase document history
   entry in the File menu.

3.2.8. Saving and restoring queries

   Both simple and advanced query dialogs save recent history, but the
   amount is limited: old queries will eventually be forgotten. Also,
   important queries may be difficult to find among others. This is why
   both types of queries can also be explicitly saved to files, from the
   GUI menus: File → Save last query / Load last query

   The default location for saved queries is a subdirectory of the current
   configuration directory, but saved queries are ordinary files and can
   be written or moved anywhere.

   Some of the saved query parameters are part of the preferences (e.g.
   autophrase or the active external indexes), and may differ when the
   query is loaded from the time it was saved. In this case, Recoll will
   warn of the differences, but will not change the user preferences.

3.2.9. Sorting search results and collapsing duplicates

   The documents in a result list are normally sorted in order of
   relevance. It is possible to specify a different sort order, either by
   using the vertical arrows in the GUI toolbox to sort by date, or
   switching to the result table display and clicking on any header. The
   sort order chosen inside the result table remains active if you switch
   back to the result list, until you click one of the vertical arrows,
   until both are unchecked (you are back to sort by relevance).

   Sort parameters are remembered between program invocations, but result
   sorting is normally always inactive when the program starts. It is
   possible to keep the sorting activation state between program
   invocations by checking the Remember sort activation state option in
   the preferences.

   It is also possible to hide duplicate entries inside the result list
   (documents with the exact same contents as the displayed one). The test
   of identity is based on an MD5 hash of the document container, not only
   of the text contents (so that e.g., a text document with an image added
   will not be a duplicate of the text only). Duplicates hiding is
   controlled by an entry in the GUI configuration dialog, and is off by
   default.

   When a result document does have undisplayed duplicates, a Dups link
   will be shown with the result list entry. Clicking the link will
   display the paths (URLs + ipaths) for the duplicate entries.

3.2.10. The term explorer tool

   Recoll automatically manages the expansion of search terms to their
   derivatives (e.g.: plural/singular, verb inflections). But there are
   other cases where the exact search term is not known. For example, you
   may not remember the exact spelling, or only know the beginning of the
   name.

   The search will only propose replacement terms with spelling variations
   when no matching document were found. In some cases, both proper
   spellings and mispellings are present in the index, and it may be
   interesting to look for them explicitly.

   The term explorer tool (started from the toolbar icon or from the Term
   explorer entry of the Tools menu) can be used to search the full index
   terms list, or (later addition), display some statistics or other index
   information. It has several modes of operations:

   Wildcard
          In this mode of operation, you can enter a search string with
          shell-like wildcards (*, ?, []). e.g.: xapi* would display all
          index terms beginning with xapi. (More about wildcards
          [248]here).

   Regular expression
          This mode will accept a regular expression as input. Example:
          word[0-9]+. The expression is implicitly anchored at the
          beginning. E.g.: press will match pression but not expression.
          You can use .*press to match the latter, but be aware that this
          will cause a full index term list scan, which can be quite long.

   Stem expansion
          This mode will perform the usual stem expansion normally done as
          part user input processing. As such it is probably mostly useful
          to demonstrate the process.

   Spelling/Phonetic
          In this mode, you enter the term as you think it is spelled, and
          Recoll will do its best to find index terms that sound like your
          entry. This mode uses the Aspell spelling application, which
          must be installed on your system for things to work (if your
          documents contain non-ASCII characters, Recoll needs an aspell
          version newer than 0.60 for UTF-8 support). The language which
          is used to build the dictionary out of the index terms (which is
          done at the end of an indexing pass) is the one defined by your
          NLS environment. Weird things will probably happen if languages
          are mixed up.

   Show index statistics
          This will print a long list of boring numbers about the index

   List files which could not be indexed
          This will show the files which caused errors, usually because
          recollindex could not translate their format into text.

   Note that in cases where Recoll does not know the beginning of the
   string to search for (e.g. a wildcard expression like *coll), the
   expansion can take quite a long time because the full index term list
   will have to be processed. The expansion is currently limited at 10000
   results for wildcards and regular expressions. It is possible to change
   the limit in the configuration file.

   Double-clicking on a term in the result list will insert it into the
   simple search entry field. You can also cut/paste between the result
   list and any entry field (the end of lines will be taken care of).

3.2.11. The Query Fragments window

   The Query Fragments window can be used to control filtering query
   language elements modifying the current query, simply by clicking a
   button. This can be useful to save typing, or avoid memorizing, simple
   clauses of common usage (e.g. selecting only standalone documents or
   attachments, or filtering out Web results, selecting a file system
   subtree, a file type, etc.).

   Selecting the Tools → Query Fragments menu entry will open the dialog.

   The contents of the window are entirely customisable, and defined by
   the contents of a XML text file, named fragment-buttons.xml and which
   will be looked for in the current index configuration directory. The
   sample file distributed with Recoll contains a number of example
   filters. This will be automatically copied to the configuration
   directory if the file does not exist in there (e.g.
   ~/.recoll/fragment-buttons.xml under Linux and MacOS,
   $HOME/AppData/Local/Recoll/fragment-buttons.xml for Windows). Editing
   the copy will allow you to configure the tool for your needs .

Note

   The fragment-buttons.xml file was named fragbuts.xml up to Recoll
   version 1.31.0. This was deemed too close to offensive for native
   English speakers, so that the file was renamed. An existing
   fragbuts.xml will still be used if fragment-buttons.xml does not exist.
   No automatic renaming will be performed.

   Here follows an example window:
   [frag-sample.png]

   And the corresponding configuration file:
<?xml version="1.0" encoding="UTF-8"?>
<fragbuttons version="1.0">

  <radiobuttons>
    <!-- Toggle Web queue results inclusion -->
    <fragbutton>
      <label>Include Web Results</label>
      <frag></frag>
    </fragbutton>
    <fragbutton>
      <label>Exclude Web Results</label>
      <frag>-rclbes:BGL</frag>
    </fragbutton>
    <fragbutton>
      <label>Only Web Results</label>
      <frag>rclbes:BGL</frag>
    </fragbutton>
  </radiobuttons>

  <radiobuttons>
    <!-- Standalone vs embedded switch -->
    <fragbutton>
      <label>Include embedded documents</label>
      <frag></frag>
    </fragbutton>
    <fragbutton>
      <label>Only standalone documents</label>
      <frag>issub:0</frag>
    </fragbutton>
    <fragbutton>
      <label>Only embedded documents</label>
      <frag>issub:1</frag>
    </fragbutton>
  </radiobuttons>

  <buttons>
    <fragbutton>
      <label>Example: Year 2010</label>
      <frag>date:2010-01-01/2010-12-31</frag>
    </fragbutton>
    <fragbutton>
      <label>Example: c++ files</label>
      <frag>ext:cpp OR ext:cxx</frag>
    </fragbutton>
    <fragbutton>
      <label>Example: My Great Directory</label>
      <frag>dir:/my/great/directory</frag>
    </fragbutton>
  </buttons>
</fragbuttons>

   There are two types of groupings radiobuttons and buttons, each
   defining a line of checkbuttons or radiobuttons inside the window. Any
   number of buttons can be selected, but the radiobuttons in a line are
   exclusive.

   Buttons are defined by a fragbutton section, which provides the label
   for a button, and the Query Language fragment which will be added (as
   an AND filter) before performing the query if the button is active.
    <fragbutton>
      <label>Example: My Great Directory</label>
      <frag>dir:/my/great/directory</frag>
    </fragbutton>

   It is also possible to add message elements inside the groups, for
   documenting the behaviour. message elements have a label but no frag
   element. Example:
  <buttons>
    <message>
      <label>This is a message</label>
    </message>
  </buttons>

   The label contents are interpreted as HTML. Take care to replace
   opening < characters with the &lt; entity if you use tags.

   The only thing that you need to know about XML for editing this file is
   that any opening tag like <label> needs to be matched by a closing tag
   after the value: </label>.

   You will normally edit the file with a regular text editor, like, e.g.
   vi or notepad. Double-clicking the file in a file manager may not work,
   because this usually opens it in a Web browser, which will not let you
   modify the contents.

3.2.12. Searching across multiple indexes

   See the section describing [249]the use of multiple indexes for
   generalities. Only the aspects concerning the recoll GUI are described
   here.

   A recoll program instance is always associated with a main index, which
   is the one to be updated when requested from the File menu, but it can
   use any number of external Recoll indexes for searching. The external
   indexes can be selected through the External Indexes tab in the
   preferences dialog, which can be reached either trough: Preferences →
   GUI Configuration → External Index Dialog or Query → External index
   dialog.

   Index selection is performed in two phases. A set of all usable indexes
   must first be defined, and then the subset of indexes to be used for
   searching. These parameters are retained across program executions
   (there are kept separately for each Recoll configuration). The set of
   all indexes is usually quite stable, while the active ones might
   typically be adjusted quite frequently.

   The main index (defined by RECOLL_CONFDIR) is always active. If this is
   undesirable, you can set up your base configuration to index an empty
   directory.

   When adding a new index to the set, you can select either a Recoll
   configuration directory, or directly a Xapian index directory. In the
   first case, the Xapian index directory will be obtained from the
   selected configuration.

   If the external index is actually located on a volume mounted from
   another machine, and references remote files, there may be a need to
   adjust the result paths so that they match the locally mounted ones
   (for opening documents). This can be done by using the [250]path
   translation facility.

   As building the set of all indexes can be a little tedious when done
   through the user interface, you can use the RECOLL_EXTRA_DBS
   environment variable to provide an initial set. This might typically be
   set up by a system administrator so that every user does not have to do
   it. The variable should define a colon-separated list of index
   directories, e.g.:
export RECOLL_EXTRA_DBS=/some/place/xapiandb:/some/other/db

   On Windows, use semi-colons (;) as separators instead of colons.

   Another environment variable, RECOLL_ACTIVE_EXTRA_DBS allows adding to
   the active list of indexes. This variable was suggested and implemented
   by a Recoll user. It is mostly useful if you use scripts to mount
   external volumes with Recoll indexes. By using RECOLL_EXTRA_DBS and
   RECOLL_ACTIVE_EXTRA_DBS, you can add and activate the index for the
   mounted volume when starting recoll. Unreachable indexes will
   automatically be deactivated when starting up.

3.2.13. Unix-like systems: displaying thumbnails

   The default format for the result list entries and the detail area of
   the result table display an icon for each result document. The icon is
   either a generic one determined from the MIME type, or a thumbnail of
   the document appearance. Thumbnails are only displayed if found in the
   standard freedesktop location, where they would typically have been
   created by a file manager.

   Recoll has no capability to create thumbnails. A relatively simple
   trick is to use the Open parent document/folder entry in the result
   list popup menu. This should open a file manager window on the
   containing directory, which should in turn create the thumbnails
   (depending on your settings). Restarting the search should then display
   the thumbnails.

   There are also [251]some pointers about thumbnail generation in the
   Recoll FAQ.

3.2.14. Unix-like systems: running arbitrary commands on result files

   Apart from the Open and Open With operations, which allow starting an
   application on a result document (or a temporary copy), based on its
   MIME type, it is also possible to run arbitrary commands on results
   which are top-level files, using the Run Script entry in the results
   pop-up menu.

   The commands which will appear in the Run Script submenu must be
   defined by .desktop files inside the scripts subdirectory of the
   current configuration directory.

   Here follows an example of a .desktop file, which could be named for
   example, ~/.recoll/scripts/myscript.desktop (the exact file name inside
   the directory is irrelevant):
[Desktop Entry]
Type=Application
Name=MyFirstScript
Exec=/home/me/bin/tryscript %F
MimeType=*/*

   The Name attribute defines the label which will appear inside the Run
   Script menu. The Exec attribute defines the program to be run, which
   does not need to actually be a script, of course. The MimeType
   attribute is not used, but needs to exist.

   The commands defined this way can also be used from links inside the
   [252]result paragraph.

   As an example, it might make sense to write a script which would move
   the document to the trash and purge it from the Recoll index.

3.2.15. Keyboard shortcuts

   A number of common actions within the graphical interface can be
   triggered through keyboard shortcuts. As of Recoll 1.29, many of the
   shortcut values can be customised from a screen in the GUI preferences.
   Most shortcuts are specific to a given context (e.g. within a preview
   window, within the result table).

   Most shortcuts can be changed to a preferred value by using the GUI
   shortcut editor: Preferences → GUI configuration → Shortcuts. In order
   to change a shortcut, just click the corresponding cell in the Shortcut
   column, and type the desired sequence.

   Table 3.1. Keyboard shortcuts
   Description Default value
   Context: almost everywhere
   Program exit Ctrl+Q
   Context: advanced search
   Load the next entry from the search history Up
   Load the previous entry from the search history Down
   Context: main window
   Clear search. This will move the keyboard cursor to the simple search
   entry and erase the current text Ctrl+S
   Move the keyboard cursor to the search entry area without erasing the
   current text Ctrl+L
   Move the keyboard cursor to the search entry area without erasing the
   current text Ctrl+Shift+S
   Toggle displaying the current results as a table or as a list Ctrl+T
   Context: main window, when showing the results as a table
   Move the keyboard cursor to currently the selected row in the table, or
   to the first one if none is selected Ctrl+R
   Jump to row 0-9 or a-z in the table Ctrl+[0-9] or Ctrl+Shift+[a-z]
   Cancel the current selection Esc
   Context: preview window
   Close the preview window Esc
   Close the current tab Ctrl+W
   Open a print dialog for the current tab contents Ctrl+P
   Load the next result from the list to the current tab Shift+Down
   Load the previous result from the list to the current tab Shift+Up
   Context: result table
   Copy the text contained in the selected document to the clipboard
   Ctrl+G
   Copy the text contained in the selected document to the clipboard, then
   exit recoll Ctrl+Alt+Shift+G
   Open the current document Ctrl+O
   Open the current document and exit Recoll Ctrl+Alst+Shift+O
   Show a full preview for the current document Ctrl+D
   Toggle showing the column names Ctrl+H
   Show a snippets (keyword in context) list for the current document
   Ctrl+E
   Toggle showing the row letters/numbers Ctrl+V
   Context: snippets window
   Close the snippets window Esc
   Find in the snippets list (method #1) Ctrl+F
   Find in the snippets list (method #2) /
   Find the next instance of the search term F3
   Find the previous instance of the search term Shift+F3

3.2.16. Search tips

Terms and search expansion

   Term completion.  While typing into the simple search entry, a popup
   menu will appear and show completions for the current string. Values
   preceded by a clock icon come from the history, those preceded by a
   magnifier icon come from the index terms. This can be disabled in the
   preferences.

   Picking up new terms from result or preview text.  Double-clicking on a
   word in the result list or in a preview window will copy it to the
   simple search entry field.

   Wildcards.  Wildcards can be used inside search terms in all forms of
   searches. [253]More about wildcards.

   Automatic suffixes.  Words like odt or ods can be automatically turned
   into query language ext:xxx clauses. This can be enabled in the Search
   preferences panel in the GUI.

   Disabling stem expansion.  Entering a capitalized word in any search
   field will prevent stem expansion (no search for gardening if you enter
   Garden instead of garden). This is the only case where character case
   should make a difference for a Recoll search. You can also disable stem
   expansion or change the stemming language in the preferences.

   Finding related documents.  Selecting the Find similar documents entry
   in the result list paragraph right-click menu will select a set of
   "interesting" terms from the current result, and insert them into the
   simple search entry field. You can then possibly edit the list and
   start a search to find documents which may be apparented to the current
   result.

   File names.  File names are added as terms during indexing, and you can
   specify them as ordinary terms in normal search fields (Recoll used to
   index all directories in the file path as terms. This has been
   abandoned as it did not seem really useful). Alternatively, you can use
   the specific file name search which will only look for file names, and
   may be faster than the generic search especially when using wildcards.

Working with phrases and proximity

   Phrases searches.  A phrase can be looked for by enclosing a number of
   terms in double quotes. Example: "user manual" will look only for
   occurrences of user immediately followed by manual. You can use the
   "Phrase" field of the advanced search dialog to the same effect.
   Phrases can be entered along simple terms in all simple or advanced
   search entry fields, except "Phrase".

   Proximity searches.  A proximity search differs from a phrase search in
   that it does not impose an order on the terms. Proximity searches can
   be entered by specifying the "Proximity" type in the advanced search,
   or by postfixing a phrase search with a 'p'. Example: "user manual"p
   would also match "manual user". Also see [254]the modifier section from
   the query language documentation.

   AutoPhrases.  This option can be set in the preferences dialog. If it
   is set, a phrase will be automatically built and added to simple
   searches in All terms and Query language modes. This will not change
   radically the results, but will give a relevance boost to the results
   where the search terms appear as a phrase. E.g.: searching for virtual
   reality will still find all documents where either virtual and reality
   appear, but those which contain virtual reality should appear sooner in
   the list.

   Phrase searches can slow down a query if most of the terms in the
   phrase are common. If the autophrase option is on, very common terms
   will be removed from the automatically constructed phrase. The removal
   threshold can be adjusted from the search preferences. This has become
   much less of a concern with recent Xapian versions and the autophrase
   option is set by default.

   Phrases and abbreviations.  Dotted abbreviations like I.B.M. are also
   automatically indexed as a word without the dots: IBM. Searching for
   the word inside a phrase (e.g.: "the IBM company") will only match the
   dotted abbreviation if you increase the phrase slack (using the
   advanced search panel control, or the o query language modifier).
   Literal occurrences of the word will be matched normally.

Others

   Using fields.  You can use the [255]query language and field
   specifications to only search certain parts of documents. This can be
   especially helpful with email, for example only searching emails from a
   specific originator:
search tips from:helpfulgui

   Result table tips.  When displaying results in table mode, you can use
   a right click on the table headers to activate a pop-up menu which will
   let you adjust what columns are displayed. You can drag the column
   headers to adjust their order. You can click them to sort by the field
   displayed in the column. You can also save the result list in CSV
   format.

   Changing the GUI geometry.  It is possible to configure the GUI in wide
   form factor by dragging the toolbars to one of the sides (their
   location is remembered between sessions), and moving the category
   filters to a menu (can be set in the Preferences → GUI configuration →
   User interface panel).

   Query explanation.  You can get an exact description of what the query
   looked for, including stem expansion, and Boolean operators used, by
   clicking on the result list header.

   Advanced search history.  You can display any of the last 100 complex
   searches performed by using the up and down arrow keys while the
   advanced search panel is active.

   Forced opening of a preview window.  You can use Shift+Click on a
   result list Preview link to force the creation of a preview window
   instead of a new tab in the existing one.

3.2.17. Customising the search interface

   You can customise some aspects of the search interface by using the GUI
   configuration entry in the Preferences menu.

   There are several tabs in the dialog, dealing with the interface
   itself, the parameters used for searching and returning results, and
   what indexes are searched.

   Most GUI settings are global and do not depend on the index in use. As
   of Recoll 1.42, it is possible to specify that some settings will
   depend on the index. At the moment, only the result table configuration
   can be saved in such a way.

Choosing the viewer applications

   By default Recoll lets the desktop choose what application should be
   used to open a given document, with exceptions.

   The details of this behaviour can be customised with the Preferences →
   GUI configuration → User interface → Choose editor applications dialog
   or by editing the [256]mimeview configuration file.

   When Use desktop preferences, at the top of the dialog, is checked, the
   desktop default is generally used, but there is a small default list of
   exceptions, for MIME types where the Recoll choice should override the
   desktop one. These are applications which are well integrated with
   Recoll, for example, on Linux, evince for viewing PDF and Postscript
   files because of its support for opening the document at a specific
   page and passing a search string as an argument. You can add or remove
   document types to the exceptions by using the dialog.

   If you prefer to completely customise the choice of applications, you
   can uncheck Use desktop preferences, in which case the Recoll
   predefined applications will be used, and can be changed for each
   document type. This is probably not the most convenient approach in
   most cases.

   In all cases, the applications choice dialog accepts multiple
   selections of MIME types in the top section, and lets you define how
   they are processed in the bottom one. In most cases, you will be using
   %f as a place holder to be replaced by the file name in the application
   command line.

   You may also change the choice of applications by editing the
   [257]mimeview configuration file if you find this more convenient.

   Under Unix-like systems, each result list entry also has a right-click
   menu with an Open With entry. This lets you choose an application from
   the list of those which registered with the desktop for the document
   MIME type, on a case by case basis.

The GUI preferences dialog

   Many aspects of the Recoll GUI can be customised from dialogs reached
   through the Preferences → GUI configuration menu choice.

   Not all choices from the different panels will be described below. Many
   are self-evident or have sufficient tooltip text to provide
   explanations.

User interface

     * Choose editor application: this opens the dialog which allows you
       to select the application to be used to open each MIME type, which
       was described in detail in the previous section.
     * Single application: when checked, starting the Recoll GUI will
       activate an existing instance instead of creating a new one.
     * Start with simple search mode: this lets you choose the value of
       the simple search type on program startup. Either a fixed value
       (e.g. Query Language), or the value in use when the program last
       exited.
     * Maximum size of search history: limit how many searches are stored
       in the history. Set to -1 for no limit. The history can be cleared
       in the File menu.
     * Start with advanced search dialog open : If you use this dialog
       frequently, checking the entries will get it to open when the GUI
       starts.
     * Remember sort activation state if set, the GUI will remember the
       sort tool state between invocations. It normally starts with
       sorting disabled.
     * Depth of side filter directory tree: decide how many levels should
       be shown in the directory filter panel.
     * Side filter dates format: allows changing how dates are displayed
       in the side filter. See the tooltip for details.
     * Document filter choice style: this will let you choose if the
       document categories are displayed as a list or a set of buttons, or
       a menu.
     * Show system tray icon: a gui instance will appear as an icon in the
       system tray. You can then also check Close to tray instead of
       exiting and Generate desktop notifications, to have e.g. a popup
       signal the completion of indexing.
     * Disable Qt autocompletion in search entry: this will disable the
       completion popup. Il will only appear, and display the full
       history, either if you enter only white space in the search area,
       or if you click the clock button on the right of the area.
     * Highlight CSS style for query terms: Terms from the user query are
       highlighted in the result list and the preview window. The
       highligthing style can be chosen here, for example color: blue or
       color: red;background: yellow. Mostly any CSS style should work.
     * Display scale: This actually adjust the font sizes everywhere
       inside the GUI and can be used on High resolution displays if the
       default characters are too small.
     * Color scheme: choose how the GUI is displayed: mostly dark on white
       (Light) or white on dark (Dark). On Windows you can also select
       System to conform to the system setting. On MacOS systems the
       option is not available, we always use the system mode.
     * Style sheet: The name of a Qt style sheet text file which is
       applied to the whole GUI on startup. The default value is empty,
       but there is a skeleton style sheet (recoll.qss) inside the
       /usr/share/recoll/examples directory. Using a style sheet, you can
       change most recoll graphical parameters: colors, fonts, etc. See
       the sample file for a few simple examples.
       You should be aware that parameters (e.g.: the background color)
       set inside the Recoll GUI style sheet will override global system
       preferences, with possible strange side effects: for example if you
       set the foreground to a light color and the background to a dark
       one in the desktop preferences, but only the background is set
       inside the Recoll style sheet, and it is light too, then text will
       appear light-on-light inside the Recoll GUI.
     * Interface language: the recoll GUI messages are normally shown in
       the language determined by the system locale (if the translation is
       available). This choice allows forcing the interface language, e.g.
       to English if the default translation is incomplete or of bad
       quality.

Result list:

     * Result list font: There is quite a lot of information shown in the
       result list, and you may want to customise the font and/or font
       size. The rest of the fonts used by Recoll are determined by your
       generic Qt configuration (try the qtconfig command).
     * Edit result list paragraph format string: allows you to change the
       presentation of each result list entry. See the [258]result list
       customisation section.
     * Edit result page HTML header insert: allows you to define text
       inserted at the end of the result page HTML header. More detail in
       the [259]result list customisation section.
     * Date format: allows specifying the format used for displaying dates
       inside the result list. This should be specified as an strftime()
       string (man strftime).
     * Abstract snippet separator: for synthetic abstracts built from
       index data, which are usually made of several snippets from
       different parts of the document, this defines the snippet
       separator, an ellipsis by default.

Preview

     * Maximum text size highlighted for preview: Disable search term
       highlighting for texts bigger than the given size to speed up
       loading. Creating highlights on search terms involves quite a lot
       of processing, and can be slow.
     * Prefer HTML to plain text for preview: if set, Recoll will display
       HTML as such inside the preview window. If this causes display
       problems, you can uncheck it to display the plain text version
       instead. A common issue is insufficient contrast on a dark mode
       display, caused by the document style sheet.
     * Activate links in preview: if set, Recoll will turn HTTP links
       found inside plain text into proper HTML anchors, and clicking a
       link inside a preview window will start the default browser on the
       link target.
     * Fields display: max field length before using summary: this is used
       to limit the size of metadata text displayed on the fields view
       (reached through the right-click popup). Fields over this size will
       be truncated, with a clickable option to expand.
     * Plain text to HTML line style: when displaying plain text inside
       the preview window, Recoll tries to preserve some of the original
       text line breaks and indentation. It can either use PRE HTML tags,
       which will well preserve the indentation but will force horizontal
       scrolling for long lines, or use BR tags to break at the original
       line breaks, which will let the editor introduce other line breaks
       according to the window width, but will lose some of the original
       indentation. The third option is the default and probably the best
       one in most cases: use PRE tags with line wrapping.
     * Search term line offset: how many lines to display over a search
       hit. Allows having some context for the found search term.

Search parameters:

     * Hide duplicate results: decides if result list entries are shown
       for identical documents found in different places.
     * Stemming language: stemming obviously depends on the document's
       language. This listbox will let you chose among the stemming
       databases which were built during indexing (this is set in the
       [260]main configuration file), or later added with recollindex -s
       (See the recollindex manual). Stemming languages which are
       dynamically added will be deleted at the next indexing pass unless
       they are also added in the configuration file.
     * Automatically add phrase to simple searches: a phrase will be
       automatically built and added to simple searches when looking for
       Any terms. This will give a relevance boost to the results where
       the search terms appear as a phrase (consecutive and in order).
     * Autophrase term frequency threshold percentage: very frequent terms
       should not be included in automatic phrase searches for performance
       reasons. The parameter defines the cutoff percentage (percentage of
       the documents where the term appears).
     * Replace abstracts from documents: this decides if we should
       synthesize and display an abstract in place of an explicit abstract
       found within the document itself.
     * Dynamically build abstracts: this decides if Recoll tries to build
       document abstracts (lists of snippets) when displaying the result
       list. Abstracts are constructed by taking context from the document
       information, around the search terms.
     * Synthetic abstract size: adjust to taste.
     * Synthetic abstract context words: how many words should be
       displayed around each term occurrence.
     * Query language magic file name suffixes: a list of words which
       automatically get turned into ext:xxx file name suffix clauses when
       starting a query language query (e.g.: doc xls xlsx...). This will
       save some typing for people who use file types a lot when querying.

External indexes:

   This panel will let you browse for additional indexes that you may want
   to search. External indexes are designated by their database directory
   (e.g.: /home/someothergui/.recoll/xapiandb,
   /usr/local/recollglobal/xapiandb).

   Once entered, the indexes will appear in the External indexes list, and
   you can chose which ones you want to use at any moment by checking or
   unchecking their entries.

   Your main database (the one the current configuration indexes to), is
   always implicitly active. If this is not desirable, you can set up your
   configuration so that it indexes, for example, an empty directory. An
   alternative indexer may also need to implement a way of purging the
   index from stale data.

The result list format

   Recoll normally uses a full function HTML processor to display the
   result list and the [261]snippets window. Depending on the version,
   this may be based on either Qt WebKit or Qt WebEngine. It is then
   possible to completely customise the result list with full support for
   CSS and Javascript.

   It is also possible to build Recoll to use a simpler Qt QTextBrowser
   widget to display the HTML, which may be necessary if the ones above
   are not ported on the system, or to reduce the application size and
   dependencies. There are limits to what you can do in this case, but it
   is still possible to decide what data each result will contain, and how
   it will be displayed.

   The result list presentation can be customised by adjusting two
   elements:
     * The paragraph format
     * HTML code inside the header section. This is also used for the
       [262]snippets window.

   The paragraph format and the header fragment can be edited from the
   Result list tab of the GUI configuration.

   The header fragment is used both for the result list and the snippets
   window. The snippets list is a table and has a snippets class
   attribute. Each paragraph in the result list is a table, with class
   respar, but this can be changed by editing the paragraph format.

   There are a few examples on the [263]page about customising the result
   list on the Recoll Web site.

The paragraph format

   This is an arbitrary HTML string which will be transformed by
   printf-like % substitutions to show the results.

Note

   Any literal % character in the input must be quoted as %%. E.g. <table
   style="width: 100%;"> should be entered as <table style="width:
   100%%;">.

   The following substitutions will be performed:

   %A
          Abstract. If %s is not present, this will be either the document
          abstract attribute if one is present, or the synthetic snippets
          abstract. If %s is present, this will be the document abstract
          or empty.

   %D
          Date.

   %I
          Icon image name. This is normally determined from the MIME type.
          The associations are defined inside the [264]mimeconf
          configuration file. If a thumbnail for the file is found at the
          standard Freedesktop location, this will be displayed instead.

   %K
          Keywords.

   %L
          Precooked Preview, Edit, and possibly Snippets links.

   %M
          MIME type.

   %N
          result Number inside the result page.

   %P
          Parent folder Url. In the case of an embedded document, this is
          the parent folder for the top level container file.

   %R
          Relevance percentage.

   %S
          Size information.

   %s
          Synthetic "snippets" abstract (selected text around search terms
          found in the document.

   %T
          Title if this is set, else Filename.

   %t
          Title or empty.

   %(filename)
          File name.

   %U
          Url

   In addition to the predefined values above, all strings like
   %(fieldname) will be replaced by the value of the field named fieldname
   for this document. Only stored fields can be accessed in this way, the
   value of indexed but not stored fields is not known at this point in
   the search process (see [265]field configuration). There are currently
   very few fields stored by default, apart from the values above (only
   author and filename), so this feature will need some custom local
   configuration to be useful. An example candidate would be the recipient
   field which is generated by the message input handlers.

   The format of the Preview, Edit, and Snippets links is <a href="P%N">,
   <a href="E%N"> and <a href="A%N"> where docnum (%N) expands to the
   document number inside the result page).

   A link target defined as "F%N" will open the document corresponding to
   the %P parent folder expansion, usually creating a file manager window
   on the folder where the container file resides. E.g.:
<a href="F%N">%P</a>

   A link target defined as R%N|scriptname will run the corresponding
   script on the result file (if the document is embedded, the script will
   be started on the top-level parent). See the [266]section about
   defining scripts. Note that scriptname value should be the value of the
   Name field of the desktop file, and not the desktop file name.

   The default value for the paragraph format string is:
"<table class=\"respar\">\n"
"<tr>\n"
"<td><a href='%U'><img src='%I' width='64'></a></td>\n"
"<td>%L &nbsp;<i>%S</i> &nbsp;&nbsp;<b>%T</b><br>\n"
"<span style='white-space:nowrap'><i>%M</i>&nbsp;%D</span>&nbsp;&nbsp;&nbsp; <i>
%U</i>&nbsp;%i<br>\n"
"%s %A %K</td>\n"
"</tr></table>\n"

   You may, for example, try the following for a more web-like experience:
<u><b><a href="P%N">%T</a></b></u><br>
%A<font color=#008000>%U - %S</font> - %L

   Note that the P%N link in the above paragraph makes the title a preview
   link. Or the clean looking:
<img src="%I" align="left">%L <font color="#900000">%R</font>
&nbsp;&nbsp;<b>%T&</b><br>%S&nbsp;
<font color="#808080"><i>%U</i></font>
<table bgcolor="#e0e0e0">
<tr><td><div>%A</div></td></tr>
</table>%K

   These samples, and some others are [267]on the web site, with pictures
   to show how they look.

   It is also possible to [268]define the value of the snippet separator
   inside the abstract section.

3.2.18. The recoll GUI command line options

   The recoll command has a number of useful command line options.

   -c configdir specifies a non-default configuration directory.

   -L lang can be used to use a different language for the GUI labels than
   the one which would be chosen according to the system locale. Some
   translations are quite incomplete and you may prefer to see the English
   messages, even if your machine is generally setup for, e.g. Spanish.
   Example:
recoll -L en

   -q query specifies a query to be run when the program starts. It takes
   a single argument, which must be quoted if it contains white space.

   -o/-l/-f/-a specify the type of query. The default is to interpret the
   -q argument as a query language string. You can use these options to
   interpret the argument as an Any Term, File Name or All Terms query
   instead.

   The -t option will tell the program to behave exactly like the recollq
   command, printing the results to the standard output (terminal) instead
   of starting a graphical window.

   The -w option starts the program minimized. -W only creates a system
   tray icon (the system tray support must be enabled in the GUI
   preferences User interface tab).

3.3. Searching with the KDE KIO slave

   The Recoll KIO slave allows performing a Recoll search by entering an
   appropriate URL in a KDE open dialog, or a Dolphin URL. The results are
   displayed as directory entries.

   The instructions for building this module are located in the source
   tree. See: kde/kio/recoll/00README.txt. Some Linux distributions do
   package the kio-recoll module, so check before diving into the build
   process, maybe it's already out there ready for one-click installation.

3.4. Searching on the command line

   There are several ways to obtain search results as a text stream,
   without a graphical interface:
     * By passing option -t to the recoll program, or by calling it as
       recollq (through a link).
     * By using the actual recollq program.
     * By writing a custom Python program, using the [269]Recoll Python
       API.

   The first two methods work in the same way and accept/need the same
   arguments (except for the additional -t to recoll). The query to be
   executed is specified as command line arguments.

   Depending on the platform, recollq is not always built or installed by
   default (as recoll -t works the same). This is a very simple program,
   and if you can program a little c++, you may find it useful to taylor
   its output format to your needs. Apart from being easily customised,
   recollq is only really useful on systems where the Qt libraries are not
   available.

   recollq has a [270]man page. The Usage string follows:
Usage: recollq [options] [query elements]
 Runs a recoll query and displays result lines.
   By default, the argument(s) will be interpreted as a Recoll query language
   string. The -q option was kept for compatibility with the GUI and is just
   ignored: the query *must* be specified in the non-option arguments.
  Query language elements:
   * Implicit AND, exclusion, field spec:  t1 -t2 title:t3
   * OR has priority: t1 OR t2 t3 OR t4 means (t1 OR t2) AND (t3 OR t4)
   * Phrase: "t1 t2" (needs additional quoting on cmd line)
 Other query modes :
  -o Emulate the GUI simple search in ANY TERM mode.
  -a Emulate the GUI simple search in ALL TERMS mode.
  -f Emulate the GUI simple search in filename mode.
 Query and results options:
  -c <configdir> : specify configuration directory, overriding $RECOLL_CONFDIR.
  -C : collapse duplicates.
  -d also dump file contents.
  -n [first-]<cnt> define the result slice. The default value for [first] is 0.
     Without the option, the default max count is 2000. Use n=0 for no limit.
  -b : basic. Just output urls, no mime types or titles.
  -Q : no result lines, just the processed query and result count.
  -m : dump the whole document meta[] array for each result.
  -A : output the document abstracts.
     -p <cnt> : show <cnt> snippets, with page numbers instead of the
         concatenated abstract.
     -g <cnt> : show <cnt> snippets, with line numbers instead of the
         concatenated abstract.
  -S fld : sort by field <fld>.
    -D : sort descending.
  -s stemlang : set stemming language to use (must exist in index...).
     Use -s "" to turn off stem expansion.
  -T <synonyms file>: use the parameter (Thesaurus) for word expansion.
  -i <dbdir> : additional index, several can be given.
  -e use url encoding (%xx) for urls.
  -E use exact result count instead of lower bound estimate.
  -F <field name list> : output exactly these fields for each result.
     The field values are encoded in base64, output in one line and
     separated by one space character. This is the recommended format
     for use by other programs. Use a normal query with option -m to
     see the field names. Use -F '' to output all fields, but you probably
     also want option -N in this case.
    -N : with -F, print the (plain text) field names before the field values.
  --extract_to <filepath> : extract the first result to filepath, which must not
     exist. Use a -n option with an offset to select the appropriate result.
  --paths-only: only print results which would have a file:// scheme, and
     exclude the scheme.
 Other non-query usages:
  -P: Show the date span for all the documents present in the index.

   Sample execution:
recollq 'ilur -nautique mime:text/html'
Recoll query: ((((ilur:(wqf=11) OR ilurs) AND_NOT (nautique:(wqf=11) OR nautique
s OR nautiqu OR nautiquement)) FILTER Ttext/html))
4 results
text/html       [file:///Users/dockes/projets/bateaux/ilur/comptes.html]      [c
omptes.html]  18593   bytes
text/html       [file:///Users/dockes/projets/nautique/webnautique/articles/ilur
1/index.html] [Constructio...
text/html       [file:///Users/dockes/projets/pagepers/index.html]    [psxtcl/wr
itemime/recoll]...
text/html       [file:///Users/dockes/projets/bateaux/ilur/factEtCie/recu-chasse
-maree....

3.5. The query language

   The Recoll query language was based on the now defunct [271]Xesam user
   search language specification. It allows defining general boolean
   searches within the main body text or specific fields, and has many
   additional features, broadly equivalent to those provided by complex
   search interface in the GUI.

   The query language processor is activated in the GUI simple search
   entry when the search mode selector is set to Query Language. It can
   also be used from the command line search, the KIO slave, or the Web
   UI.

   If the results of a query language search puzzle you and you doubt what
   has been actually searched for, you can use the GUI Show Query link at
   the top of the result list to check the exact query which was finally
   executed by Xapian.

3.5.1. General syntax

   Here follows a sample request that we are going to explain:
        author:"john doe" Beatles OR Lennon Live OR Unplugged -potatoes

   This would search for all documents with John Doe appearing as a phrase
   in the author field (exactly what this is would depend on the document
   type, e.g.: the From: header, for an email message), and containing
   either beatles or lennon and either live or unplugged but not potatoes
   (in any part of the document).

   An element is composed of an optional field specification, and a value,
   separated by a colon (the field separator is the last colon in the
   element). Examples:
     * Eugenie
     * author:balzac
     * dc:title:grandet
     * dc:title:"eugenie grandet"

   The colon, if present, means "contains". Xesam defines other relations,
   which are mostly unsupported for now (except in special cases,
   described further down).

   All elements in the search entry are normally combined with an implicit
   AND. It is possible to specify that elements be OR'ed instead, as in
   Beatles OR Lennon. The OR must be entered literally (capitals), and it
   has priority over the AND associations: word1 word2 OR word3 means
   word1 AND (word2 OR word3) not (word1 AND word2) OR word3.

   You can use parentheses to group elements, which will sometimes make
   things clearer, and may allow expressing combinations which would have
   been difficult otherwise.

   An element preceded by a - specifies a term that should not appear.

   By default, words inside double-quotes define a phrase search (the
   order of words is significant), so that title:"prejudice pride" is not
   the same as title:prejudice title:pride, and is unlikely to find a
   result. This can be changed by using [272]modifiers.

   Words inside phrases and capitalized words are not stem-expanded.
   Wildcards may be used anywhere inside a term. Specifying a wildcard on
   the left of a term can produce a very slow search (or even an incorrect
   one if the expansion is truncated because of excessive size). Also see
   [273]More about wildcards.

   To save you some typing, a field value given as a comma-separated list
   of terms will be interpreted as an AND list and a slash-separated list
   as an OR list. No white space is allowed. So:
author:john,lennon

   will search for documents with john AND lennon inside the author field
   (in any order), and
author:john/ringo

   would search for john OR ringo. This behaviour is only triggered by a
   field prefix: without it, comma- or slash- separated input will produce
   a phrase search. However, you can use a text field name to search the
   main text this way, as an alternate to using an explicit OR, e.g.
   text:napoleon/bonaparte would generate a search for napoleon OR
   bonaparte in the main text body.

   Modifiers can be set on a double-quote value, for example to specify a
   proximity search (unordered). See [274]the modifier section. No space
   must separate the final double-quote and the modifiers value, e.g. "two
   one"po10

   Recoll currently manages the following default fields:
     * title, subject or caption are synonyms which specify data to be
       searched for in the document title or subject.
     * author or from for searching the documents originators.
     * recipient or to for searching the documents recipients.
     * keyword for searching the document-specified keywords (few
       documents actually have any).
     * filename for the document's file name. You can use the shorter fn
       alias. This value is not set for all documents: internal documents
       contained inside a compound one (for example an EPUB section) do
       not inherit the container file name any more, this was replaced by
       an explicit field (see next). Sub-documents can still have a
       filename, if it is implied by the document format, for example the
       attachment file name for an email attachment.
     * containerfilename, aliased as cfn. This is set for all documents,
       both top-level and contained sub-documents, and is always the name
       of the filesystem file which contains the data. The terms from this
       field can only be matched by an explicit field specification (as
       opposed to terms from filename which are also indexed as general
       document content). This avoids getting matches for all the
       sub-documents when searching for the container file name.
     * ext specifies the file name extension (Ex: ext:html).
     * rclmd5 the MD5 checksum for the document. This is used for
       displaying the duplicates of a search result (when querying with
       the option to collapse duplicate results). Incidentally, this could
       be used to find the duplicates of any given file by computing its
       MD5 checksum and executing a query with just the rclmd5 value.

   You can define aliases for field names, in order to use your preferred
   denomination or to save typing (e.g. the predefined fn and cfn aliases
   defined for filename and containerfilename). See the [275]section about
   the fields file.

   The document input handlers have the possibility to create other fields
   with arbitrary names, and aliases may be defined in the configuration,
   so that the exact field search possibilities may be different for you
   if someone took care of the customisation.

3.5.2. Special field-like specifiers

   The field syntax also supports a few field-like, but special, criteria,
   for which the values are interpreted differently. Regular processing
   does not apply (for example the slash- or comma- separated lists don't
   work). A list follows.
     * dir for filtering the results on file location. For example,
       dir:/home/me/somedir will restrict the search to results found
       anywhere under the /home/me/somedir directory (including
       subdirectories).
       Tilde expansion will be performed as usual. Wildcards will be
       expanded, but please [276]have a look at an important limitation of
       wildcards in path filters.
       You can also use relative paths. For example, dir:share/doc would
       match either /usr/share/doc or /usr/local/share/doc.
       -dir will find results not in the specified location.
       Several dir clauses can be specified, both positive and negative.
       For example the following makes sense:
dir:recoll dir:src -dir:utils -dir:common
       This would select results which have both recoll and src in the
       path (in any order), and which have not either utils or common.
       You can also use OR conjunctions with dir: clauses.
       On Unix-like systems, a special aspect of dir clauses is that the
       values in the index are not transcoded to UTF-8, and never
       lower-cased or unaccented, but stored as binary. This means that
       you need to enter the values in the exact lower or upper case, and
       that searches for names with diacritics may sometimes be impossible
       because of character set conversion issues. Non-ASCII UNIX file
       paths are an unending source of trouble and are best avoided.
       You need to use double-quotes around the path value if it contains
       space characters.
       The shortcut syntax to define OR or AND lists within fields with
       commas or slash characters is not available.
     * size for filtering the results on file size. Example: size<10000.
       You can use <, > or = as operators. You can specify a range like
       the following: size>100 size<1000. The usual k/K, m/M, g/G, t/T can
       be used as (decimal) multipliers. Ex: size>1k to search for files
       bigger than 1000 bytes.
     * date for searching or filtering on dates. The syntax for the
       argument is based on the ISO8601 standard for dates and time
       intervals. Only dates are supported, no times. The general syntax
       is 2 elements separated by a / character. Each element can be a
       date or a period of time. Periods are specified as PnYnMnD. The n
       numbers are the respective numbers of years, months or days, any of
       which may be missing. Dates are specified as YYYY-MM-DD. The days
       and months parts may be missing. If the / is present but an element
       is missing, the missing element is interpreted as the lowest or
       highest date in the index. Examples:
          + 2001-03-01/2002-05-01 the basic syntax for an interval of
            dates.
          + 2001-03-01/P1Y2M the same specified with a period.
          + 2001/ from the beginning of 2001 to the latest date in the
            index.
          + 2001 the whole year of 2001
          + P2D/ means 2 days ago up to now if there are no documents with
            dates in the future.
          + /2003 all documents from 2003 or older.
       Periods can also be specified with small letters (e.g.: p2y).
     * mime or format for specifying the MIME type. These clauses are
       processed apart from the normal Boolean logic of the search:
       multiple values will be OR'ed (instead of the normal AND). You can
       specify types to be excluded, with the usual -, and use wildcards.
       Example: mime:text/* -mime:text/plain. Specifying an explicit
       boolean operator before a mime specification is not supported and
       will produce strange results.
     * type or rclcat for specifying the category (as in
       text/media/presentation/etc.). The classification of MIME types in
       categories is defined in the Recoll configuration (mimeconf), and
       can be modified or extended. The default category names are those
       which permit filtering results in the main GUI screen. Categories
       are OR'ed like MIME types above, and can be negated with -.
     * issub for specifying that only standalone (issub:0) or only
       embedded (issub:1) documents should be returned as results.

Note

   mime, rclcat, size, issub and date criteria always affect the whole
   query (they are applied as a final filter), even if set with other
   terms inside a parenthesis.

Note

   mime (or the equivalent rclcat) is the only field with an OR default.
   You do need to use OR with ext terms for example.

3.5.3. Range clauses

   Recoll 1.24 and later support range clauses on fields which have been
   configured to support it. No default field uses them currently, so this
   paragraph is only interesting if you modified the fields configuration
   and possibly use a custom input handler.

   A range clause looks like one of the following:
myfield:small..big
myfield:small..
myfield:..big

   The nature of the clause is indicated by the two dots .., and the
   effect is to filter the results for which the myfield value is in the
   possibly open-ended interval.

   See the section about the [277]fields configuration file for the
   details of configuring a field for range searches (list them in the
   [values] section).

3.5.4. Modifiers

   Some characters are recognized as search modifiers when found
   immediately after the closing double quote of a phrase, as in "some
   term"modifierchars. The actual "phrase" can be a single term of course.
   Supported modifiers:
     * l can be used to turn off stemming (mostly makes sense with p
       because stemming is off by default for phrases, but see also x
       further down).
     * o can be used to specify a "slack" for both phrase and proximity
       searches: the number of additional terms that may be found between
       the specified ones. If o is followed by an integer number, this is
       the slack, else the default is 10. The default slack (with no o) is
       0 for phrase searches and 10 for proximity searches.
     * p can be used to turn an ordered phrase search into an unordered
       proximity one. Example: "order any in"p. You can find a little more
       detail about phrase and proximity searches [278]here.
     * s can be used to turn off synonym expansion, if a synonyms file is
       in place.
     * x (1.33.2) will enable the expansion of terms inside a phrase
       search (the default is for phrases to be searched verbatim). Also
       see the [279]stemexpandphrases in the configuration section, for
       changing the default behaviour.
     * A weight can be specified for a query element by specifying a
       decimal value at the start of the modifiers. Example:
       "Important"2.5.

   The following only make sense on indexes which are capable of case and
   diacritics sensitivity (not the default):
     * C will turn on case sensitivity.
     * D will turn on diacritics sensitivity (if the index supports it).
     * e (explicit) will turn on diacritics sensitivity and case
       sensitivity, and prevent stem expansion.

3.6. Wildcards and anchored searches

   Some special characters are interpreted by Recoll in search strings to
   expand or specialize the search. Wildcards expand a root term in
   controlled ways. Anchor characters can restrict a search to succeed
   only if the match is found at or near the beginning of the document or
   one of its fields.

3.6.1. Wildcards

   All words entered in Recoll search fields will be processed for
   wildcard expansion before the request is finally executed.

   The wildcard characters are:
     * * which matches 0 or more characters.
     * ? which matches a single character.
     * [] which allow defining sets of characters to be matched (ex: [abc]
       matches a single character which may be 'a' or 'b' or 'c', [0-9]
       matches any number.

   You should be aware of a few things when using wildcards.
     * Using a wildcard character at the beginning of a word can make for
       a slow search because Recoll will have to scan the whole index term
       list to find the matches. However, this is much less a problem for
       field searches, and queries like author:*@domain.com can sometimes
       be very useful.
     * Using a * at the end of a word can produce more matches than you
       would think, and strange search results. You can use the [280]term
       explorer tool to check what completions exist for a given term. You
       can also see exactly what search was performed by clicking on the
       link at the top of the result list. In general, for natural
       language terms, stem expansion will produce better results than an
       ending * (stem expansion is turned off when any wildcard character
       appears in the term).

Wildcards and path filtering

   Due to the way that Recoll processes wildcards inside dir path
   filtering clauses, they will have a multiplicative effect on the query
   size. A clause containing wildcards in several paths elements, like,
   for example, dir:/home/me/*/*/docdir, will almost certainly fail if
   your indexed tree is of any realistic size.

   Depending on the case, you may be able to work around the issue by
   specifying the paths elements more narrowly, with a constant prefix, or
   by using 2 separate dir: clauses instead of multiple wildcards, as in
   dir:/home/me dir:docdir. The latter query is not equivalent to the
   initial one because it does not specify a number of directory levels,
   but that's the best we can do (and it may be actually more useful in
   some cases).

3.6.2. Anchored searches

   Two characters are used to specify that a search hit should occur at
   the beginning or at the end of the text. ^ at the beginning of a term
   or phrase constrains the search to happen at the start, $ at the end
   force it to happen at the end.

   As this function is implemented as a phrase search it is possible to
   specify a maximum distance at which the hit should occur, either
   through the controls of the advanced search panel, or using the query
   language, for example, as in:
"^someterm"o10

   which would force someterm to be found within 10 terms of the start of
   the text. This can be combined with a field search as in
   somefield:"^someterm"o10 or somefield:someterm$.

   This feature can also be used with an actual phrase search, but in this
   case, the distance applies to the whole phrase and anchor, so that, for
   example, bla bla my unexpected term at the beginning of the text would
   be a match for "^my term"o5.

   Anchored searches can be very useful for searches inside somewhat
   structured documents like scientific articles, in case explicit
   metadata has not been supplied, for example for looking for matches
   inside the abstract or the list of authors (which occur at the top of
   the document).

3.7. Using Synonyms

   Term synonyms and text search:  in general, there are two main ways to
   use term synonyms for searching text:
     * At index creation time, they can be used to alter the indexed
       terms, either increasing or decreasing their number, by expanding
       the original terms to all synonyms, or by reducing all synonym
       terms to a canonical one.
     * At query time, they can be used to match texts containing terms
       which are synonyms of the ones specified by the user, either by
       expanding the query for all synonyms, or by reducing the user entry
       to canonical terms (the latter only works if the corresponding
       processing has been performed while creating the index).

   With one exception, Recoll only uses synonyms at query time. A user
   query term which part of a synonym group will be optionally expanded
   into an OR query for all terms in the group.

   The one exception is that if the [281]idxsynonyms parameter is set
   during indexing, and if the file contains multi-word synonyms, a
   multi-word single term will be emitted for every occurrence found in
   the text. If the same file is in use at query time, this will allow
   phrase and proximity searches to work for the multi-word synonyms.

   Synonym groups are defined inside ordinary text files. Each line in the
   file defines a group.

   Example:
hi hello "good morning"

# not sure about "au revoir" though. Is this english ?
bye goodbye "see you" \
"au revoir"

   As usual, lines beginning with a # are comments, empty lines are
   ignored, and lines can be continued by ending them with a backslash.

   Multi-word synonyms are supported, but be aware that these will
   generate phrase queries, which may degrade performance and will disable
   stemming expansion for the phrase terms.

   The contents of the synonyms file must be casefolded (not only
   lowercased), because this is what expected at the point in the query
   processing where it is used. There are a few cases where this makes a
   difference, for example, German sharp s should be expressed as ss,
   Greek final sigma as sigma. For reference, Python3 has an easy way to
   casefold words (str.casefold()).

   The synonyms file can be specified in the Search parameters tab of the
   GUI configuration Preferences menu entry, or as an option for
   command-line searches.

   Once the file is defined, the use of synonyms can be enabled or
   disabled directly from the Preferences menu.

   The synonyms are searched for matches with user terms after the latter
   are stem-expanded, but the contents of the synonyms file itself is not
   subjected to stem expansion. This means that a match will not be found
   if the form present in the synonyms file is not present anywhere in the
   document set (same with accents when using a raw index).

   The synonyms function is probably not going to help you find your
   letters to Mr. Smith. It is best used for domain-specific searches. For
   example, it was initially suggested by a user performing searches among
   historical documents: the synonyms file would contains nicknames and
   aliases for each of the persons of interest.

3.8. Path translations

   In some cases, the document paths stored inside the index do not match
   the actual ones, so that document previews and accesses will fail. This
   can occur in a number of circumstances:
     * When using multiple indexes it is a relatively common occurrence
       that some will actually reside on a remote volume, for example
       mounted via NFS. In this case, the paths used to access the
       documents on the local machine are not necessarily the same than
       the ones used while indexing on the remote machine. For example,
       /home/me may have been used as a topdirs elements while indexing,
       but the directory might be mounted as /net/server/home/me on the
       local machine.
     * The case may also occur with removable disks. It is perfectly
       possible to configure an index to live with the documents on the
       removable disk, but it may happen that the disk is not mounted at
       the same place so that the documents paths from the index are
       invalid. In some case, the path adjustments [282]can be automated.
     * As a last example, one could imagine that a big directory has been
       moved, but that it is currently inconvenient to run the indexer.

   Recoll has a facility for rewriting access paths when extracting the
   data from the index. The translations can be defined for the main index
   and for any additional query index.

   In the above NFS example, Recoll could be instructed to rewrite any
   file:///home/me URL from the index to file:///net/server/home/me,
   allowing accesses from the client.

   The translations are defined in the [283]ptrans configuration file,
   which can be edited with a plain text editor or by using the GUI
   external indexes configuration dialog: Preferences → External index
   dialog, then click the Paths translations button on the right below the
   index list: translations will be set for the main index if no external
   index is currently selected in the list, or else for the currently
   selected index.

   Example entry from a ptrans file:
[/path/to/external/xapiandb]
/some/index/path = /some/local/path

   This would decide that, for the index stored in
   /path/to/external/xapiandb, any occurence of /some/index/path should be
   replaced with /some/local/path when presenting a result.

Windows note

   At the moment, the path comparisons done for path translation under MS
   Windows are case sensitive (this will be fixed at some point). Use the
   natural character case as displayed in the file explorer. Example:
[Z:/some/mounted/xapiandb]
C: = Z:

3.9. Search case and diacritics sensitivity

   When working with a raw index (not the default), searches can be made
   sensitive to character case and diacritics. How this happens is
   controlled by configuration variables and what search data is entered.

   The general default is that searches entered without upper-case or
   accented characters are insensitive to case and diacritics. An entry of
   resume will match any of Resume, RESUME, résumé, Résumé etc.

   Two configuration variables can automate switching on sensitivity:

   autodiacsens
          If this is set, search sensitivity to diacritics will be turned
          on as soon as an accented character exists in a search term.
          When the variable is set to true, resume will start a
          diacritics-unsensitive search, but résumé will be matched
          exactly. The default value is false.

   autocasesens
          If this is set, search sensitivity to character case will be
          turned on as soon as an upper-case character exists in a search
          term except for the first one. When the variable is set to true,
          us or Us will start a diacritics-unsensitive search, but US will
          be matched exactly. The default value is true (contrary to
          autodiacsens).

   As usual, capitalizing the first letter of a word will turn off its
   stem expansion and have no effect on case-sensitivity.

   You can also explicitly activate case and diacritics sensitivity by
   using modifiers with the query language. C will make the term
   case-sensitive, and D will make it diacritics-sensitive. Examples:

   "us"C will search for the term us exactly (Us will not be a match).

   "resume"D will search for the term resume exactly (résumé will not be a
   match).

   When either case or diacritics sensitivity is activated, stem expansion
   is turned off. Having both does not make much sense.

3.10. Desktop integration

   Being independent of the desktop type has its drawbacks: Recoll desktop
   integration is minimal. However there are a few tools available:
     * Users of recent Ubuntu-derived distributions, or any other Gnome
       desktop systems (e.g. Fedora) can install the [284]Recoll GSSP
       (Gnome Shell Search Provider).
     * For KDE users, there is a KIO worker module, which was described in
       a [285]previous section, and a Krunner plugin. Both are usually
       installed with the main Recoll package.
     * Hotkeying recoll: it is surprisingly convenient to be able to show
       or hide the Recoll GUI with a single keystroke. Recoll comes with a
       small Python script, based on the libwnck window manager interface
       library, which will allow you to do just this. The detailed
       instructions are on [286]this wiki page.

Chapter 4. Programming interface

   Recoll has an Application Programming Interface, usable both for
   indexing and searching, currently accessible from the Python language.

   Another less radical way to extend the application is to write input
   handlers for new types of documents.

   The processing of metadata attributes for documents (fields) is highly
   configurable.

4.1. Writing a document input handler

Terminology

   The small programs or pieces of code which handle the processing of the
   different document types for Recoll used to be called filters, which is
   still reflected in the name of the directory which holds them and many
   configuration variables. They were named this way because one of their
   primary functions is to filter out the formatting directives and keep
   the text content. However these modules may have other behaviours, and
   the term input handler is now progressively substituted in the
   documentation. filter is still used in many places though.

   Recoll input handlers cooperate to translate from the multitude of
   input document formats, simple ones as opendocument, acrobat, or
   compound ones such as Zip or Email, into the final Recoll indexing
   input format, which is plain text (in many cases the processing
   pipeline has an intermediary HTML step, which may be used for better
   previewing presentation). Most input handlers are executable programs
   or scripts. A few handlers are coded in C++ and live inside
   recollindex. This latter kind will not be described here.

   There are two kinds of external executable input handlers:
     * Simple exec handlers run once and exit. They can be bare programs
       like antiword, or scripts using other programs. They are very
       simple to write, because they just need to print the converted
       document to the standard output. Their output can be plain text or
       HTML. HTML is usually preferred because it can store metadata
       fields and it allows preserving some of the formatting for the GUI
       preview. However, these handlers have limitations:
          + They can only process one document per file.
          + The output MIME type must be known and fixed.
          + For handlers producing text/plain, the character encoding must
            be known and fixed (or possibly just depending on location).
     * Multiple execm handlers can process multiple files (sparing the
       process startup time which can be very significant), or multiple
       documents per file (e.g.: for archives or multi-chapter
       publications). They communicate with the indexer through a simple
       protocol, but are nevertheless a bit more complicated than the
       older kind. Most of the new handlers are written in Python
       (exception: rclimg which is written in Perl because exiftool has no
       real Python equivalent). The Python handlers use common modules to
       factor out the boilerplate, which can make them very simple in
       favorable cases. The subdocuments output by these handlers can be
       directly indexable (text or HTML), or they can be other simple or
       compound documents that will need to be processed by another
       handler.

   In both cases, handlers deal with regular file system files, and can
   process either a single document, or a linear list of documents in each
   file. Recoll is responsible for performing up to date checks, deal with
   more complex embedding, temporary files, and other upper level issues.

   A simple handler returning a document in text/plain format, can
   transfer no metadata to the indexer. Generic metadata, like document
   size or modification date, will be gathered and stored by the indexer.

   Handlers that produce text/html format can return an arbitrary amount
   of metadata inside HTML meta tags. These will be processed according to
   the directives found in the [287]fields configuration file.

   The handlers that can handle multiple documents per file return a
   single piece of data to identify each document inside the file. This
   piece of data, called an ipath will be sent back by Recoll to extract
   the document at query time, for previewing, or for creating a temporary
   file to be opened by a viewer. These handlers can also return metadata
   either as HTML meta tags, or as named data through the communication
   protocol.

   The following section describes the simple handlers, and the next one
   gives a few explanations about the execm ones. You could conceivably
   write a simple handler with only the elements in the manual. This will
   not be the case for the other ones, for which you will have to look at
   the code.

4.1.1. Simple input handlers

   Recoll simple handlers are usually shell-scripts, but this is in no way
   necessary. Extracting the text from the native format is the difficult
   part. Outputting the format expected by Recoll is trivial. Happily
   enough, most document formats have translators or text extractors which
   can be called from the handler. In some cases the output of the
   translating program is completely appropriate, and no intermediate
   shell-script is needed.

   Input handlers are called with a single argument which is the source
   file name. They should output the result to stdout.

   When writing a handler, you should decide if it will output plain text
   or HTML. Plain text is simpler, but you will not be able to add
   metadata or vary the output character encoding (this will be defined in
   a configuration file). Additionally, some formatting may be easier to
   preserve when previewing HTML. Actually the deciding factor is
   metadata: Recoll has a way to [288]extract metadata from the HTML
   header and use it for field searches..

   The RECOLL_FILTER_FORPREVIEW environment variable (values yes, no)
   tells the handler if the operation is for indexing or previewing. Some
   handlers use this to output a slightly different format, for example
   stripping uninteresting repeated keywords (e.g.: Subject: for email)
   when indexing. This is not essential.

   You should look at one of the simple handlers, for example rclps for a
   starting point.

   Don't forget to make your handler executable before testing !

4.1.2. "Multiple" handlers

   If you can program and want to write an execm handler, it should not be
   too difficult to make sense of one of the existing handlers.

   The best documentation of the communication "protocol" is found in the
   comments at the top of the internfile/mh_execm.h header file.

   The existing handlers differ in the amount of helper code which they
   are using:
     * rclimg is written in Perl and handles the execm protocol all by
       itself (showing how trivial it is).
     * All the Python handlers share at least the rclexecm.py module,
       which handles the communication. Have a look at, for example,
       rclzip.py for a handler which uses rclexecm.py directly.
     * Most Python handlers which process single-document files by
       executing another command are further abstracted by using the
       rclexec1.py module. See for example rclrtf.py for a simple one, or
       rcldoc.py for a slightly more complicated one (possibly executing
       several commands).
     * Handlers which extract text from an XML document by using an XSLT
       style sheet are now executed inside recollindex, with only the
       style sheet stored in the filters/ directory. These can use a
       single style sheet (e.g. abiword.xsl), or two sheets for the data
       and metadata (e.g. opendoc-body.xsl and opendoc-meta.xsl). The
       mimeconf configuration file defines how the sheets are used, have a
       look. Before the C++ import, the xsl-based handlers used a common
       module rclgenxslt.py, it is still around but unused at the moment.
       The handler for OpenXML presentations is still the Python version
       because the format did not fit with what the C++ code does. It
       would be a good base for another similar issue.

   There is a sample trivial handler based on rclexecm.py, with many
   comments, not actually used by Recoll. It would index a text file as
   one document per line. Look for rcltxtlines.py in the src/filters
   directory in the online Recoll [289]Git repository (the sample not in
   the distributed release at the moment).

   You can also have a look at the slightly more complex rclzip.py which
   uses Zip file paths as identifiers (ipath).

   execm handlers sometimes need to make a choice for the nature of the
   ipath elements that they use in communication with the indexer. Here
   are a few guidelines:
     * Use ASCII or UTF-8 (if the identifier is an integer print it, for
       example, like printf %d would do).
     * If at all possible, the data should make some kind of sense when
       printed to a log file to help with debugging.
     * Recoll uses a colon (:) as a separator to store a complex path
       internally (for deeper embedding). Colons inside the ipath elements
       output by a handler will be escaped, but would be a bad choice as a
       handler-specific separator (mostly, again, for debugging issues).

   In any case, the main goal is that it should be easy for the handler to
   extract the target document, given the file name and the ipath element.

   execm handlers will also produce a document with a null ipath element.
   Depending on the type of document, this may have some associated data
   (e.g. the body of an email message), or none (typical for an archive
   file). If it is empty, this document will be useful anyway for some
   operations, as the parent of the actual data documents.

4.1.3. Telling Recoll about the handler

   There are two elements that link a file to the handler which should
   process it: the association of file to MIME type and the association of
   a MIME type with a handler.

   The association of files to MIME types is mostly based on name
   suffixes. The types are defined inside the [290]mimemap file. Example:
.doc = application/msword

   If no suffix association is found for the file name, recent Recoll will
   use libmagic. Older versions or specially built ones may try to execute
   a system command (typically file -i or xdg-mime).

   The second element is the association of MIME types to handlers in the
   [291]mimeconf file. A sample will probably be better than a long
   explanation:
[index]
application/msword = exec antiword -t -i 1 -m UTF-8;\
mimetype = text/plain ; charset=utf-8

application/ogg = exec rclogg

text/rtf = exec unrtf --nopict --html; charset=iso-8859-1; mimetype=text/html

application/x-chm = execm rclchm.py

   The fragment specifies that:
     * application/msword files are processed by executing the antiword
       program, which outputs text/plain encoded in utf-8.
     * application/ogg files are processed by the rclogg script, with
       default output type (text/html, with encoding specified in the
       header, or utf-8 by default).
     * text/rtf is processed by unrtf, which outputs text/html. The
       iso-8859-1 encoding is specified because it is not the utf-8
       default, and not output by unrtf in the HTML header section.
     * application/x-chm is processed by a persistent handler. This is
       determined by the execm keyword.

4.1.4. Input handler output

   Both the simple and persistent input handlers can return any MIME type
   to Recoll, which will further process the data according to the MIME
   configuration.

   Most input filters filters produce either text/plain or text/html data.
   There are exceptions, for example, filters which process archive file
   (zip, tar, etc.) will usually return the documents as they are found,
   without processing them further.

   There is nothing to say about text/plain output, except that its
   character encoding should be consistent with what is specified in the
   mimeconf file.

   For filters producing HTML, the output could be very minimal like the
   following example:
<html>
  <head>
    <meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
  </head>
  <body>
    Some text content
  </body>
</html>

   You should take care to escape some characters inside the text by
   transforming them into appropriate entities. At the very minimum, "&"
   should be transformed into "&amp;", "<" should be transformed into
   "&lt;". This is not always properly done by external helper programs
   which output HTML, and of course never by those which output plain
   text.

   When encapsulating plain text in an HTML body, the display of a preview
   may be improved by enclosing the text inside <pre> tags.

   The character set needs to be specified in the header. It does not need
   to be UTF-8 (Recoll will take care of translating it), but it must be
   accurate for good results.

   Recoll will process meta tags inside the header as possible document
   fields candidates. Documents fields can be processed by the indexer in
   different ways, for searching or displaying inside query results. This
   is described in a [292]following section.

   By default, the indexer will process the standard header fields if they
   are present: title, meta/description, and meta/keywords are both
   indexed and stored for query-time display.

   A predefined non-standard meta tag will also be processed by Recoll
   without further configuration: if a date tag is present and has the
   right format, it will be used as the document date (for display and
   sorting), in preference to the file modification date. The date format
   should be as follows:
<meta name="date" content="YYYY-mm-dd HH:MM:SS">

   or
<meta name="date" content="YYYY-mm-ddTHH:MM:SS">

   Example:
<meta name="date" content="2013-02-24 17:50:00">

   Input handlers also have the possibility to "invent" field names. This
   should also be output as meta tags:
<meta name="somefield" content="Some textual data" />

   You can embed HTML markup inside the content of custom fields, for
   improving the display inside result lists. In this case, add a (wildly
   non-standard) markup attribute to tell Recoll that the value is HTML
   and should not be escaped for display.
<meta name="somefield" markup="html" content="Some <i>textual</i> data" />

   As written above, the processing of fields is described in a
   [293]further section.

   Persistent filters can use another, probably simpler, method to produce
   metadata, by calling the setfield() helper method. This avoids the
   necessity to produce HTML, and any issue with HTML quoting. See, for
   example, rclaudio.py in Recoll 1.23 and later for an example of handler
   which outputs text/plain and uses setfield() to produce metadata.

4.1.5. Page numbers

   The indexer will interpret ^L characters in the handler output as
   indicating page breaks, and will record them. At query time, this
   allows starting a viewer on the right page for a hit or a snippet.
   Currently, only the PDF, Postscript and DVI handlers generate page
   breaks.

4.2. Field data processing

   Fields are named pieces of information in or about documents, like
   title, author, abstract.

   The field values for documents can appear in several ways during
   indexing: either output by input handlers as meta fields in the HTML
   header section, or extracted from file extended attributes, or added as
   attributes of the Doc object when using the API, or again synthetized
   internally by Recoll.

   The Recoll query language allows searching for text in a specific
   field.

   Recoll defines a number of default fields. Additional ones can be
   output by handlers, and described in the fields configuration file.

   Fields can be:
     * indexed, meaning that their terms are separately stored in inverted
       lists (with a specific prefix), and that a field-specific search is
       possible.
     * stored, meaning that their value is recorded in the index data
       record for the document, and can be returned and displayed with
       search results.

   A field can be either or both indexed and stored. This and other
   aspects of fields handling is defined inside the fields configuration
   file.

   Some fields may also designated as supporting range queries, meaning
   that the results may be selected for an interval of its values. See the
   [294]configuration section for more details.

   The sequence of events for field processing is as follows:
     * During indexing, recollindex scans all meta fields in HTML
       documents (most document types are transformed into HTML at some
       point). It compares the name for each element to the configuration
       defining what should be done with fields (the fields file)
     * If the name for the meta element matches one for a field that
       should be indexed, the contents are processed and the terms are
       entered into the index with the prefix defined in the fields file.
     * If the name for the meta element matches one for a field that
       should be stored, the content of the element is stored with the
       document data record, from which it can be extracted and displayed
       at query time.
     * At query time, if a field search is performed, the index prefix is
       computed and the match is only performed against appropriately
       prefixed terms in the index.
     * At query time, the field can be displayed inside the result list by
       using the appropriate directive in the definition of the
       [295]result list paragraph format. All fields are displayed on the
       fields screen of the preview window (which you can reach through
       the right-click menu). This is independent of the fact that the
       search which produced the results used the field or not.

   You can find more information in the [296]section about the fields
   file, or in comments inside the file.

   You can also have a look at the [297]example in the FAQs area,
   detailing how one could add a page count field to pdf documents for
   displaying inside result lists.

4.3. Python API

4.3.1. Introduction

   The Recoll Python programming interface can be used both for searching
   and for creating/updating an index with a program run by the Python3
   interpreter. It is available on all platforms (Unix-like systems, MS
   Windows, MacOS).

   The search interface is used in a number of active projects: the
   [298]Recoll Gnome Shell Search Provider, the [299]Recoll Web UI, and
   the [300]upmpdcli UPnP Media Server, in addition to many small scripts.

   The index updating part of the API can be used to create and update
   Recoll indexes. Up to Recoll 1.37 these needed to use separate
   configurations (but could be queried in conjunction with the regular
   index). As of Recoll 1.37, an external indexer based on the Python
   extension can update the main index. For example the Recoll indexer for
   the Joplin notes application is using this method.

   The search API is modeled along the Python database API version 2.0
   specification (early versions used the version 1.0 spec).

   The recoll package contains two modules:
     * The recoll module contains functions and classes used to query or
       update the index.
     * The rclextract module contains functions and classes used at query
       time to access document data. This can be used, for example, for
       extracting embedded documents into standalone files.

   There is a good chance that your system repository has packages for the
   Recoll Python API, sometimes in a package separate from the main one
   (maybe named something like python3-recoll). Else refer to the
   [301]Building from source chapter.

   As an introduction sample, the following small program will run a query
   and list the title and url for each of the results. The python/samples
   source directory contains several examples of Python programming with
   Recoll, exercising the extension more completely, and especially its
   data extraction features.
#!/usr/bin/python3

from recoll import recoll

db = recoll.connect()
query = db.query()
nres = query.execute("some query")
results = query.fetchmany(20)
for doc in results:
    print("%s %s" % (doc.url, doc.title))

   You can also take a look at the source for (in order of complexity) the
   Recoll [302]Gnome Shell Search Provider or [303]WebUI, and the
   [304]upmpdcli local media server.

4.3.2. Interface elements

   A few elements in the interface are specific and and need an
   explanation.

   ipath
          An ipath identifies an embedded document inside a standalone one
          (designated by an URL). The value, if needed, is stored along
          with the URL, but not indexed. It is accessible or set as a
          field in the Doc object.

          ipaths are opaque values for the lower index layers (Doc objects
          producers or consumers), and their use is up to the specific
          indexer. For example, the Recoll file system indexer uses the
          ipath to store the part of the document access path internal to
          (possibly imbricated) container documents. ipath in this case is
          a vector of access elements (e.g, the first part could be a path
          inside a zip file to an archive member which happens to be an
          mbox file, the second element would be the message sequential
          number inside the mbox etc.). The index itself has no knowledge
          of this hierarchical structure.

          At the moment, only the filesystem indexer uses hierarchical
          ipaths (neither the Web nor the Joplin one do), and there are
          some assumptions in the upper software layers about their
          structure. For example, the Recoll GUI knows about using an FS
          indexer ipath for such functions as opening the immediate parent
          of a given document.

          url and ipath are returned in every search result and define the
          access to the original document. ipath is empty for top-level
          document/files (e.g. a PDF document which is a filesystem file).

   udi
          An udi (unique document identifier) identifies a document.
          Because of limitations inside the index engine, it is restricted
          in length (to 200 bytes). The structure and contents of the udi
          is defined by the application and opaque to the index engine.
          For example, the internal file system indexer uses the complete
          document path (file path + internal path), truncated to a
          maximum length, the suppressed part being replaced by a hash
          value to retain practical unicity.

          To rephrase, and hopefully clarify: the filesystem indexer can't
          use the URL+ipath as a unique document-identifying term because
          this may be too big: it derives a shorter udi from URL+ipath.
          Another indexer could use a completely different method. For
          example, the Joplin indexer uses the note ID.

   parent_udi
          If this attribute is set on a document when entering it in the
          index, it designates its physical container document. In a
          multilevel hierarchy, this may not be the immediate parent. If
          the indexer uses the purge() method, then the use of parent_udi
          is mandatory for subdocuments. Else it is optional, but its use
          by an indexer may simplify index maintenance, as Recoll will
          automatically delete all children defined by parent_udi == udi
          when the document designated by udi is destroyed. e.g. if a Zip
          archive contains entries which are themselves containers, like
          mbox files, all the subdocuments inside the Zip file (mbox,
          messages, message attachments, etc.) would have the same
          parent_udi, matching the udi for the Zip file, and all would be
          destroyed when the Zip file (identified by its udi) is removed
          from the index.

   Stored and indexed fields
          The [305]fields file inside the Recoll configuration defines
          which document fields are either indexed (searchable), stored
          (retrievable with search results), or both. Apart from a few
          standard/internal fields, only the stored fields are retrievable
          through the Python search interface.

4.3.3. Log messages for Python scripts

   Two specific configuration variables: pyloglevel and pylogfilename
   allow overriding the generic values for Python programs. Set pyloglevel
   to 2 to suppress default startup messages (printed at level 3).

4.3.4. Python search interface

The recoll module

connect(confdir=None, extra_dbs=None, writable = False)

   The connect() function connects to one or several Recoll index(es) and
   returns a Db object.

   This call initializes the recoll module, and it should always be
   performed before any other call or object creation.
     * confdir designates the main index configuration directory. The
       usual system-dependant defaults apply if the value is empty.
     * extra_dbs is a list of additional external indexes (Xapian
       directories). These will be queried, but supply no configuration
       values.
     * writable decides if we can index new data through this connection.

   Example:
from recoll import recoll

# Opening the default db
db = recoll.connect()

# Opening the default db and a pair of additional indexes
db = recoll.connect(extra_dbs=["/home/me/.someconfdir/xapiandb", "/data/othercon
f/xapiandb"])

The Db class

   A Db object is created by a connect() call and holds a connection to a
   Recoll index.

   Db.query(), Db.cursor()
          These (synonym) methods return a blank Query object for this
          index.

   Db.getdoc(udi, idxidx=0)
          Retrieve a document given its unique document identifier, and
          its index if external indexes are in use. The main index is
          always index 0. The udi value could have been obtained from an
          earlier query as doc.rcludi, or would be known because the
          application is the indexer and generates the values.

   Db.termMatch(match_type, expr, field='', maxlen=-1, casesens=False,
          diacsens=False, lang='english')
          Expand an expression against the index term list. Performs the
          basic function from the GUI term explorer tool. match_type can
          be one of wildcard, regexp or stem. field, if set, restricts the
          matches to the contents of the specified metadata field. Returns
          a list of terms expanded from the input expression.

   Db.setAbstractParams(maxchars, contextwords)
          Set the parameters used to build snippets (sets of keywords in
          context text fragments). maxchars defines the maximum total size
          of the abstract. contextwords defines how many terms are shown
          around the keyword.

   Db.close()
          Closes the connection. You can't do anything with the Db object
          after this. If the index was opened as writable, this commits
          any pending change.

   Db.setSynonymsFile(path)
          Set the synonyms file used when querying.

The Query class

   A Query object (equivalent to a cursor in the Python DB API) is created
   by a Db.query() call. It is used to execute index searches.

   Query.sortby(fieldname, ascending=True)
          Set the sorting order for future searches to using fieldname, in
          ascending or descending order. Must be called before executing
          the search.

   Query.execute(query_string, stemming=1, stemlang="english",
          fetchtext=False, collapseduplicates=False)
          Start a search for query_string, a Recoll search language
          string. If the index stores the documents texts and fetchtext is
          True, the Doc objects in the query result will store the
          document extracted text in doc.text. Else, the doc.text fields
          will be empty. If collapseduplicates is true, only one of
          multiple identical documents (defined by having the same MD5
          hash) will appear in the result list.

   Query.executesd(SearchData, fetchtext=False, collapseduplicates=False)
          Starts a search for the query defined by the SearchData object.
          See above for a description of the other parameters.

   Query.fetchmany(size=query.arraysize)
          Fetch the next Doc objects from the current search result list,
          and return them as an array of the required size, which is by
          default the value of the arraysize data member.

   Query.fetchone()
          Fetch the next Doc object from the current search result list.
          Generates a StopIteration exception if there are no results
          left.

   Query.__iter__() and Query.next()
          So that things like for doc in query: will work. Example:

from recoll import recoll

db = recoll.connect()
q = db.query()
nres = q.execute("some query")
for doc in q:
    print("%s" % doc.title)

   Query.close()
          Close the query. The object is unusable after the call.

   Query.scroll(value, mode='relative')
          Adjust the position in the current result set. mode can be
          relative or absolute.

   Query.getgroups()
          Retrieve the expanded query terms as a list of pairs. Meaningful
          only after executexx In each pair, the first entry is a list of
          user terms (of size one for simple terms, or more for group and
          phrase clauses), the second a list of query terms derived from
          the user terms and used in the Xapian Query.

   Query.getxquery()
          Return the Xapian query description as a Unicode string.
          Meaningful only after executexx.

   Query.highlight(text, ishtml = 0, methods = object)
          Will insert <span "class=rclmatch">, and </span> tags around the
          match areas in the input text and return the modified text.
          ishtml can be set to indicate that the input text is HTML and
          that HTML special characters should not be escaped. methods, if
          set, should be an object having methods startMatch(i) and
          endMatch() which will be called for each match and should return
          a begin and end tag. Example:

class MyHighlighter:
    def startMatch(self, idx):
        return "<span style='color:red;background:yellow;'>"
    def endMatch(self):
        return "</span>"

   Query.makedocabstract(doc, methods = object))
          Create a snippets abstract for doc (a Doc object) by selecting
          text around the match terms. If methods is set, will also
          perform highlighting. See the highlight() method.

   Query.getsnippets(doc, maxoccs = -1, ctxwords = -1, sortbypage=False,
          methods=object)
          Return a list of extracts from the result document by selecting
          text around the match terms. Each entry in the result list is a
          triple: page number, term, text. By default, the most relevants
          snippets appear first in the list. Set sortbypage to sort by
          page number instead. If methods is set, the fragments will be
          highlighted (see the highlight() method). If maxoccs is set, it
          defines the maximum result list length. ctxwords allows
          adjusting the individual snippet context size.

   Query.arraysize
          (r/w). Default number of records processed by fetchmany().

   Query.rowcount
          Number of records returned by the last execute.

   Query.rownumber
          Next index to be fetched from results. Normally increments after
          each fetchone() call, but can be set/reset before the call to
          effect seeking (equivalent to using scroll()). Starts at 0.

The Doc class

   A Doc object contains index data for a given document. The data is
   extracted from the index when searching, or set by the indexer program
   when updating.

   Please note that a Doc should never be instanciated by its constructor
   but instead by calling db.doc() or some other API method returning a
   doc object. Otherwise, the object will lack some necessary references.

   The Doc object has many attributes to be read or set by its user. It
   mostly matches the Rcl::Doc C++ object. Some of the attributes are
   predefined, but, especially when indexing, others can be set, the name
   of which will be processed as field names by the indexing
   configuration. Inputs can be specified as Unicode or strings. Outputs
   are Unicode objects. All dates are specified as Unix timestamps,
   printed as strings. Please refer to the rcldb/rcldoc.cpp C++ file for a
   full description of the predefined attributes. Here follows a short
   list.
     * url the document URL but see also getbinurl()
     * ipath the document ipath for embedded documents.
     * fbytes, dbytes the document file and text sizes.
     * fmtime, dmtime the document file and document times.
     * xdocid the document Xapian document ID. This is useful if you want
       to access the document through a direct Xapian operation.
     * mtype the document MIME type.
     * text holds the document processed text, if the index itself is
       configured to store it (true by default) and if the fetchtext query
       execute() option was true. See also the rclextract module for
       accessing document contents.
     * Other fields stored by default: author, filename, keywords,
       recipient

   At query time, only the fields that are defined as stored either by
   default or in the fields configuration file will be meaningful in the
   Doc object.

   get(key), [] operator
          Retrieve the named document attribute. You can also use
          getattr(doc, key) or doc.key.

   doc.key = value
          Set the the named document attribute. You can also use
          setattr(doc, key, value).

   getbinurl()
          Retrieve the URL in byte array format (no transcoding), for use
          as parameter to a system call. This is useful for the filesystem
          indexer file:// URLs which are stored unencoded, as binary data.

   setbinurl(url)
          Set the URL in byte array format (no transcoding).

   items()
          Return a dictionary of doc object keys/values

   keys()
          list of doc object keys (attribute names).

The SearchData class

   A SearchData object allows building a query by combining clauses, for
   execution by Query.executesd(). It can be used in replacement of the
   query language approach. The interface is going to change a little, so
   no detailed doc for now...

   addclause(type='and'|'or'|'excl'|'phrase'|'near'|'sub', qstring=string,
          slack=0, field='', stemming=1, subSearch=SearchData)

The rclextract module

   Prior to Recoll 1.25, index queries could not provide document content
   because it was never stored. Recoll 1.25 and later usually store the
   document text, which can be optionally retrieved when running a query
   (see query.execute() above - the result is always plain text).

   Independantly, the rclextract module can give access to the original
   document and to the document text content, possibly as an HTML version.
   Accessing the original document is particularly useful if it is
   embedded (e.g. an email attachment).

   You need to import the recoll module before the rclextract module.

The Extractor class

   Extractor(doc)
          An Extractor object is built from a Doc object, output from a
          query.

   Extractor.textextract(ipath)
          Extract document defined by ipath and return a Doc object. The
          doc.text field has the document text converted to either
          text/plain or text/html according to doc.mimetype. The typical
          use would be as follows:

from recoll import recoll, rclextract

qdoc = query.fetchone()
extractor = rclextract.Extractor(qdoc)
doc = extractor.textextract(qdoc.ipath)
# use doc.text, e.g. for previewing

          Passing qdoc.ipath to textextract() is redundant, but reflects
          the fact that the Extractor object actually has the capability
          to access the other entries in a compound document.

   Extractor.idoctofile(ipath, targetmtype, outfile='')
          Extracts document into an output file, which can be given
          explicitly or will be created as a temporary file to be deleted
          by the caller. Typical use:

from recoll import recoll, rclextract

qdoc = query.fetchone()
extractor = rclextract.Extractor(qdoc)
filename = extractor.idoctofile(qdoc.ipath, qdoc.mimetype)

          In all cases the output is a copy, even if the requested
          document is a regular system file, which may be wasteful in some
          cases. If you want to avoid this, you can test for a simple file
          document as follows:

not doc.ipath and (not "rclbes" in doc.keys() or doc["rclbes"] == "FS")

Search API usage example

   The following sample would query the index with a user language string.
   See the python/samples directory inside the Recoll source for other
   examples. The recollgui subdirectory has a very embryonic GUI which
   demonstrates the highlighting and data extraction functions.
#!/usr/bin/python3

from recoll import recoll

db = recoll.connect()
db.setAbstractParams(maxchars=80, contextwords=4)

query = db.query()
nres = query.execute("some user question")
print("Result count: %d" % nres)
if nres > 5:
    nres = 5
for i in range(nres):
    doc = query.fetchone()
    print("Result #%d" % (query.rownumber))
    for k in ("title", "size"):
        print("%s : %s" % (k, getattr(doc, k)))
    print("%s\n" % db.makeDocAbstract(doc, query))

The fsudi module

   The fsudi module contains a single method, which duplicates the code
   used by the main filesystem indexer to derive an UDI from a filesystem
   path. In turn, this allows external code to call the getDoc() method to
   retrieve the Doc object. This can be useful, for example, for updating
   the metadata without fully reindexing the document.

   fsudi.fs_udi(path, ipath='')
          Obtain the UDI value for the given path and ipath. The returned
          value can be used with the db.getDoc() method.

4.3.5. Python indexing interface

Recoll external indexers

   The Recoll indexer is capable of processing many different document
   formats. However, some forms of data storage do not lend themselves
   easily to standard processing because of their great variability. A
   canonical example would be data in an SQL database. While it might be
   possible to create a configurable handler to process data from a
   database, the many variations in storage organisation and SQL dialects
   make this difficult.

   Recoll can instead support external indexers where all the
   responsibility to handle the data format is delegated to an external
   script. The script language has to be Python 3 at the moment, because
   this is the only language for which an API binding exists.

   Up to Recoll 1.35, such an indexer had to work on a separate Recoll
   index, which would be added as an external index for querying from the
   main one, and for which a separate indexing schedule had to be managed.
   The reason was that the main document indexer purge pass (removal of
   deleted documents) would also remove all the documents belonging to the
   external indexer, as they were not seen during the filesystem walk (and
   conversely, the external indexer purge pass would delete all the
   regular document entries).

   As of Recoll 1.36, an improvement and new API call allows external
   indexers to be fully integrated, and work on the main index, with
   updates triggered from the normal recollindex program.

   An external indexer has to do the same work as the Recoll file system
   indexer: look for modified documents, extract their text, call the API
   for indexing them, and the one for purging the data for deleted
   documents.

   A description of the API method follows, but you can also [306]jump
   ahead for a look at some sample pseudo-code and a pair of actual
   implementations, one of which does something useful.

The Python indexing API

   There are two parts in the indexing interface:
     * Methods inside the recoll module allow the foreign indexer to
       update the index.
     * An interface based on scripts execution is defined for executing
       the indexer (from recollindex) and to allow either the GUI or the
       rclextract module to access original document data for previewing
       or editing.

   Two sample scripts are included with the Recoll source and described in
   more detail a [307]bit further.

Python indexing interface methods

   The update methods are part of the recoll module. The connect() method
   is used with a writable=true parameter to obtain a writable Db object.
   The following Db object methods are then available.

   Note that the changes are only guaranteed to be flushed to the index
   when db.close() is called. This normally occurs when the program exits,
   but it is much safer to use an explicit call after making the changes.

   addOrUpdate(udi, doc, parent_udi=None, metaonly=False)
          Add or update index data for a given document.

          The [308]udi string must define a unique id for the document. It
          is an opaque interface element and not interpreted inside the
          lower level Recoll code.

          doc is a [309]Doc object, containing the data to be indexed.

          If [310]parent_udi is set, this is a unique identifier for the
          top-level container, the document for which needUpdate() would
          be called (e.g. for the filesystem indexer, this would be the
          one which is an actual file).

          If metaonly is set, the main document text (in the doc.text
          attribute) will be ignored and only the other metadata fields
          present in the doc object will be processed. This can be useful
          for updating the metadata apart from the main text.

          Document attributes: doc.text should have the main text. It is
          ignored if metaonly is set. For actual indexing (metaonly not
          set), the url and possibly ipath fields should also be set to
          allow access to the actual document after a query. Other fields
          may also need to be set by an external indexer see the
          description further down: rclbes, sig, mimetype. Of course, any
          standard or custom Recoll field can also be added.

   delete(udi)
          Purge the index from all data for udi, and all documents (if
          any) which have udi as parent_udi.

   needUpdate(udi, sig)
          Test if the index needs to be updated for the document
          identified by udi. If this call is to be used, the doc.sig field
          should contain a signature value when calling addOrUpdate(). The
          needUpdate() call then compares its parameter value with the
          stored sig for udi. sig is an opaque value, compared as a
          string.

          The filesystem indexer uses a concatenation of the decimal
          string values for file size and update time, but a hash of the
          contents could also be used.

          As a side effect, if the return value is false (the index is up
          to date), the call will set the existence flag for the document
          (and any subdocument defined by its parent_udi), so that a later
          purge() call will preserve them.

          The use of needUpdate() and purge() is optional, and the indexer
          may use another method for checking the need to reindex or to
          delete stale entries.

   preparePurge(backend_name)
          Mark all documents which do *not* belong to backend_name as
          existing. backend_name is the value chosen for the rclbes field
          for the indexer documents (e.g. "MBOX", "JOPLIN"... for the
          samples). This is a mandatory call before starting an update if
          the index is shared with other backends and you are going to
          call purge() after the update, else all documents for other
          backends will be deleted from the index by the purge.

   purge()
          Delete all documents that were not touched during the just
          finished indexing pass (since preparePurge()). These are the
          documents for which the needUpdate() call was not performed,
          indicating that they no longer exist in the storage system.

   createStemDbs(lang|sequence of langs)
          Create stemming dictionaries for query stemming expansion. Note
          that this is not needed at all if the indexing is done from the
          recollindex program, as it will perform this action after
          calling all the external indexers. Should be called when done
          updating the index. Available only after Recoll 1.34.3. As an
          alternative, you can close the index and execute:

recollindex -c <confdir> -s <lang(s)>

          The Python module currently has no interface to the Aspell
          speller functions, so the same approach can be used for creating
          the spelling dictionary (with option -S) (again, not needed if
          recollindex is driving the indexing).

Query data access for external indexers

   Recoll has internal methods to access document data for its internal
   (filesystem) indexer. An external indexer needs to provide data access
   methods if it needs integration with the GUI (e.g. preview function),
   or support for the rclextract module.

   An external indexer needs to provide two commands, for fetching data
   (typically for previewing) and for computing the document signature
   (for up-to-date checks when opening or previewing). The sample MBOX and
   JOPLIN implementations use the same script with different parameters to
   perform both operations, but this is just a choice. A third command
   must be provided for performing the indexing proper.

   The "fetch" and "makesig" scripts are called with three additional
   arguments: udi, url, ipath. These were set by the indexer and stored
   with the document by the addOrUpdate() call described above. Not all
   arguments are needed in all cases, the script will use what it needs to
   perform the requested operation. The caller expects the result data on
   stdout.

   recollindex will set the RECOLL_CONFDIR environment variable when
   executing the scripts, so that the configuration can be created as
rclconf = rclconfig.RclConfig()

   if needed, and the configuration directory obtained as
confdir = rclconf.getConfDir()

External indexers configuration

   The index data and the access method are linked by the rclbes (recoll
   backend storage) Doc field. You should set this to a short string value
   identifying your indexer (e.g. the filesystem indexer uses either FS or
   an empty value, the Web history indexer uses BGL, the Joplin notes
   indexer uses JOPLIN).

   The link is actually performed inside a backends configuration file
   (stored in the configuration directory). This defines commands to
   execute to access data from the specified indexer. Example, for the
   mbox indexing sample found in the Recoll source (which sets
   rclbes="MBOX"):
[MBOX]
fetch = /path/to/recoll/src/python/samples/rclmbox.py fetch
makesig = /path/to/recoll/src/python/samples/rclmbox.py makesig
index = /path/to/recoll/src/python/samples/rclmbox.py index

   When updating the index, the recollindex will execute the value of the
   the index parameter, if present (it may not be present if this concerns
   an external index).

   If an external indexer needs to store additional configuration
   parameters, e.g. path to a specific instance of the indexed
   application, etc., I suggest storing them inside recoll.conf, with a
   backend-specific prefix (e.g. joplin_db, mbox_directory) and using
   methods from the rclconfig module to access them.

External indexer samples

   First a quick look at an indexer main part, using pseudo-Python3 code:
# Connect to the recoll index. This will use the RECOLL_CONFDIR variable, set
# by the parent recollindex process, to use the right index.
rcldb = recoll.connect(writable=1)

# Important: tell the Recoll db that we are going to update documents for the
# MYBACK backend. All other documents will be marked as present so as
# not to be affect by the subsequent purge.
rcldb.preparePurge("MYBACK")

# Walk your dataset (of course your code will not look like this)
for mydoc in mydoclist:
    # Compute the doc unique identifier and the signature corresponding to its u
pdate state
    # (e.g. mtime and size for a file).
    udi = mydoc.udi()
    sig = mydoc.sig()
    # Check with recoll if the document needs updating. This has the side-effect
 or marking
    # it present.
    if not rcldb.needUpdate(udi, sig):
        continue
    # The document data does not exist in the index or needs updating. Create an
d add a Recoll
    # Doc object
    doc = recoll.Doc()
    doc.mimetype = "some/type"
    # Say that the document belongs to this indexer
    doc.rclbes = "MYBACK"
    # The url will be passed back to you along with the udi if the fetch
    # method is called later (for previewing), or may be used for opening the do
cument with
    # its native app from Recoll. The udi has a maximum size because it is used
as a Xapian
    # term. The url has no such limitation.
    doc.url = "someurl"
    doc.sig = sig
    # Of course add other fields like "text" (duh), "author" etc. See the sample
s.
    doc.text = mydoc.text()
    # [...]
    # Then add or update the data in the index.
    self.db.addOrUpdate(udi, doc)

# Finally call purge to delete the data for documents which were not seen at all
.
db.purge()

   The Recoll source tree has two samples of external indexers.
     * [311]rclmbox.py indexes a directory containing mbox folder files.
       Of course it is not really useful because Recoll can do this by
       itself, but it exercises most features in the update interface, and
       it has both top-level and embedded documents so it demonstrates the
       uses of the ipath values.
     * [312]rcljoplin.py indexes a Joplin application main notes SQL
       table. Joplin sets an an update date attribute for each record in
       the table, so each note record can be processed as a standalone
       document (no ipath necessary). The sample has full preview and open
       support (the latter using a Joplin callback URL which allows
       displaying the result note inside the native app), so it could
       actually be useful to perform a unified search of the Joplin data
       and the regular Recoll data. As of Recoll 1.37.0, the Joplin
       indexer is part of the default installation (see the features
       section of the Web site for more information).

   See the comments inside the scripts for more information.

Using an external indexer index in conjunction with a regular one

   When adding an external indexer to a regular one for unified querying,
   some elements of the foreign index configuration should be copied or
   merged into the main index configuration. At the very least, the
   backends file needs to be copied or merged, and also possibly data from
   the mimeconf and mimeview files. See the rcljoplin.py sample for an
   example.

Chapter 5. Configuration

5.1. Settings, configuration overview

   Recoll has two kinds of configuration parameters:
     * GUI settings are global and always set through the GUI
       configuration Preferences menu entry.
     * Index configuration parameters are set per index, and stored in
       each index configuration directory. Many can be set either through
       the GUI or by editing a text configuration file, but some less
       common values can only be set by editing.

GUI settings

   For reference, the GUI settings are stored in
   $HOME/.config/Recoll.org/recoll.conf on Unix-like systems and
   C:/Users/[you]/AppData/Roaming/Recoll/recoll.ini on Windows.

Index configuration parameters

   The parameters for each Recoll index are set inside text configuration
   files located in a configuration directory. There can be several such
   directories, each of which defines the parameters for one index.

   There is a default index configuration directory, used when not
   specified otherwise. On Unix-like systems it is located in
   $HOME/.recoll. Under Windows, it is located in
   C:\Users\[you]\AppData\Local\Recoll.

   The configuration files can be edited with a plain text editor or
   through the Index configuration dialog (Preferences menu). The GUI tool
   will try to preserve your formatting and comments as much as possible,
   so it is quite possible to use both approaches on the same
   configuration.

   For each index, there are actually at least two sets of configuration
   files. The parameters set in the locations listed above override or
   complement system-wide configuration files which come with the
   installation and are kept in a directory named like
   /usr/share/recoll/examples on Unix-like systems (or an equivalent on
   other systems), and define default values, shared by all indexes (the
   values in these files are often commented out, and just present to
   indicate the default coded in the program).

   The local configuration directory only stores additional or overriding
   parameters. The defaults are stored in the central location. You should
   never edit the central files as they will be overwritten by a software
   update.

   The location for the index configuration directory can be changed, or
   others can be added for separate indexes with the RECOLL_CONFDIR
   environment variable or the -c option parameter to recoll and
   recollindex.

   Special use: in addition, for each index, it is possible to specify two
   additional configuration directories which will be stacked before and
   after the user configuration directory. These are defined by the
   RECOLL_CONFTOP and RECOLL_CONFMID environment variables. Values from
   configuration files inside the top directory will override user ones,
   values from configuration files inside the middle directory will
   override system ones and be overridden by user ones. These two
   variables may be of use to applications which augment Recoll
   functionality, and need to add configuration data without disturbing
   the user's files. Please note that the two, currently single, values
   will probably be interpreted as colon-separated lists in the future: do
   not use colon characters inside the directory paths.

   If the default configuration directory does not exist when either
   recoll or recollindex is started, it will be created with a set of
   empty configuration files. recoll will give you a chance to edit the
   configuration file before starting indexing. recollindex will proceed
   immediately. To avoid mistakes, the automatic directory creation will
   only occur for the default location, not if -c or RECOLL_CONFDIR were
   used, in which case, you will have to create the directory.

   All configuration files share the same format. For example, a short
   extract of the main configuration file might look as follows:
# Space-separated list of files and directories to index.
topdirs =  ~/docs /usr/share/doc

[~/somedirectory-with-utf8-txt-files]
defaultcharset = utf-8

   There are three kinds of lines:
     * Comments: start with a hash mark #.
     * Parameter assignments: name = value.
     * Section definitions: [somedirname].

   Lines which are empty or only containing white space are ignored.

   Long lines can be broken by ending each incomplete part with a
   backslash (\).

   Depending on the type of configuration file, section definitions either
   separate groups of parameters or allow redefining some parameters for a
   directory sub-tree. They stay in effect until another section
   definition, or the end of file, is encountered. Some of the parameters
   used for indexing are looked up hierarchically from the current
   directory location upwards. Not all parameters can be meaningfully
   redefined, this is specified for each in the next section.

Important

   Global parameters must not be defined in a directory subsection, else
   they will not be found at all by the Recoll code, which looks for them
   at the top level (e.g. skippedPaths).

   When found at the beginning of a file path, the tilde character (~) is
   expanded to the name of the user's home directory, as a shell would do.
   The same convention is used on Windows.

   Some parameters are lists of strings. White space is used for
   separation. List elements with embedded spaces can be quoted using
   double-quotes. Double quotes inside these elements can be escaped with
   a backslash.

   No value inside a configuration file can contain a newline character.
   Long lines can be continued by escaping the physical newline with
   backslash, even inside quoted strings.
          astringlist =  "some string \
          with spaces"
          thesame = "some string with spaces"

   Parameters which are not part of string lists can't be quoted, and
   leading and trailing space characters are stripped before the value is
   used.

Important

   Quotes processing is ONLY applied to parameter values which are lists.
   Double quoting a single value like, e.g. dbdir will result in an
   incorrect value, with quotes included. This is quite confusing, and was
   a design mistake but it is much too late to fix.

   Encoding issues.  Most of the configuration parameters are plain ASCII.
   Two particular sets of values may cause encoding issues:
     * File path parameters may contain non-ASCII characters and should
       use the exact same byte values as found in the file system
       directory. Usually, this means that the configuration file should
       use the system default locale encoding.
     * The unac_except_trans parameter (meaning unaccenting exception
       translations) should be encoded in UTF-8. If your system locale is
       not UTF-8 (which is now very rare), and you need to also specify
       non-ASCII file paths, this poses a difficulty because common text
       editors cannot handle multiple encodings in a single file. In this
       relatively unlikely case, you can edit the configuration file as
       two separate text files with appropriate encodings, and concatenate
       them to create the complete configuration.

5.2. Environment variables

   RECOLL_CONFDIR
          Defines the main configuration directory.

   RECOLL_TMPDIR, TMPDIR
          Locations for temporary files, in this order of priority. The
          default if none of these is set is to use /tmp. Big temporary
          files may be created during indexing, mostly for decompressing,
          and also for processing, e.g. email attachments.

   RECOLL_CONFTOP, RECOLL_CONFMID
          Allow adding configuration directories with priorities below and
          above the user directory (see above the Configuration overview
          section for details).

   RECOLL_EXTRA_DBS, RECOLL_ACTIVE_EXTRA_DBS
          Help for setting up external indexes. See [313]this paragraph
          for explanations.

   RECOLL_DATADIR
          Defines replacement for the default location of Recoll data
          files, normally found in, e.g., /usr/share/recoll).

   RECOLL_FILTERSDIR
          Defines replacement for the default location of Recoll filters,
          normally found in, e.g., /usr/share/recoll/filters).

   ASPELL_PROG
          aspell program to use for creating the spelling dictionary. The
          result has to be compatible with the libaspell which Recoll is
          using.

5.3. Recoll main configuration file, recoll.conf

5.3.1. Parameters affecting what documents we index

   topdirs
          Space-separated list of files or directories to recursively
          index. You can use symbolic links in the list, they will be
          followed, independently of the value of the followLinks
          variable. The default value is ~ : recursively index $HOME.

   monitordirs
          Space-separated list of files or directories to monitor for
          updates. When running the real-time indexer, this allows
          monitoring only a subset of the whole indexed area. The elements
          must be included in the tree defined by the 'topdirs' members.

   skippedNames
          File and directory names which should be ignored. White space
          separated list of wildcard patterns (simple ones, not paths,
          must contain no '/' characters), which will be tested against
          file and directory names.

          Have a look at the default configuration for the initial value,
          some entries may not suit your situation. The easiest way to see
          it is through the GUI Index configuration "local parameters"
          panel.

          The list in the default configuration does not exclude hidden
          directories (names beginning with a dot), which means that it
          may index quite a few things that you do not want. On the other
          hand, email user agents like Thunderbird usually store messages
          in hidden directories, and you probably want this indexed. One
          possible solution is to have ".*" in "skippedNames", and add
          things like "~/.thunderbird" "~/.evolution" to "topdirs".

          Not even the file names are indexed for patterns in this list,
          see the "noContentSuffixes" variable for an alternative approach
          which indexes the file names. Can be redefined for any subtree.

   skippedNames-
          List of name patterns to remove from the default skippedNames
          list. Allows modifying the list in the local configuration
          without copying it.

   skippedNames+
          List of name patterns to add to the default skippedNames list.
          Allows modifying the list in the local configuration without
          copying it.

   onlyNames
          Regular file name filter patterns. This is normally empty. If
          set, only the file names not in skippedNames and matching one of
          the patterns will be considered for indexing. Can be redefined
          per subtree. Does not apply to directories.

   noContentSuffixes
          List of name endings (not necessarily dot-separated suffixes)
          for which we don't try MIME type identification, and don't
          uncompress or index content. Only the names will be indexed.
          This complements the now obsoleted recoll_noindex list from the
          mimemap file, which will go away in a future release (the move
          from mimemap to recoll.conf allows editing the list through the
          GUI). This is different from skippedNames because these are name
          ending matches only (not wildcard patterns), and the file name
          itself gets indexed normally. This can be redefined for
          subdirectories.

   noContentSuffixes-
          List of name endings to remove from the default
          noContentSuffixes list.

   noContentSuffixes+
          List of name endings to add to the default noContentSuffixes
          list.

   skippedPaths
          Absolute paths we should not go into. Space-separated list of
          wildcard expressions for absolute filesystem paths (for files or
          directories). The variable must be defined at the top level of
          the configuration file, not in a subsection.

          Any value in the list must be textually consistent with the
          values in topdirs, no attempts are made to resolve symbolic
          links. In practise, if, as is frequently the case, /home is a
          link to /usr/home, your default topdirs will have a single entry
          "~" which will be translated to "/home/yourlogin". In this case,
          any skippedPaths entry should start with "/home/yourlogin" *not*
          with "/usr/home/yourlogin".

          The index and configuration directories will automatically be
          added to the list.

          The expressions are matched using "fnmatch(3)" with the
          FNM_PATHNAME flag set by default. This means that "/" characters
          must be matched explicitly. You can set
          "skippedPathsFnmPathname" to 0 to disable the use of
          FNM_PATHNAME (meaning that "/*/dir3" will match
          "/dir1/dir2/dir3").

          The default value contains the usual mount point for removable
          media to remind you that it is in most cases a bad idea to have
          Recoll work on these. Explicitly adding "/media/xxx" to the
          "topdirs" variable will override this.

   skippedPathsFnmPathname
          Set to 0 to override use of FNM_PATHNAME for matching skipped
          paths.

   nowalkfn
          File name which will cause its parent directory to be skipped.
          Any directory containing a file with this name will be skipped
          as if it was part of the skippedPaths list. Ex: .recoll-noindex

   daemSkippedPaths
          skippedPaths equivalent specific to real time indexing. This
          enables having parts of the tree which are initially indexed but
          not monitored. If daemSkippedPaths is not set, the daemon uses
          skippedPaths.

   followLinks
          Follow symbolic links during indexing. The default is to ignore
          symbolic links to avoid multiple indexing of linked files. No
          effort is made to avoid duplication when this option is set to
          true. This option can be set individually for each of the
          "topdirs" members by using sections. It can not be changed below
          the "topdirs" level. Links in the "topdirs" list itself are
          always followed.

   indexedmimetypes
          Restrictive list of indexed MIME types. Normally not set (in
          which case all supported types are indexed). If it is set, only
          the types from the list will have their contents indexed. The
          names will be indexed anyway if indexallfilenames is set
          (default). MIME type names should be taken from the mimemap file
          (the values may be different from xdg-mime or file -i output in
          some cases). Can be redefined for subtrees.

   excludedmimetypes
          List of excluded MIME types. Lets you exclude some types from
          indexing. MIME type names should be taken from the mimemap file
          (the values may be different from xdg-mime or file -i output in
          some cases) Can be redefined for subtrees.

   nomd5types
          MIME types for which we don't compute a md5 hash. md5 checksums
          are used only for deduplicating results, and can be very
          expensive to compute on multimedia or other big files. This list
          lets you turn off md5 computation for selected types. It is
          global (no redefinition for subtrees). At the moment, it only
          has an effect for external handlers (exec and execm). The file
          types can be specified by listing either MIME types (e.g.
          audio/mpeg) or handler names (e.g. rclaudio.py).

   compressedfilemaxkbs
          Size limit for compressed files. We need to decompress these in
          a temporary directory for identification, which can be wasteful
          in some cases. Limit the waste. Negative means no limit. 0
          results in no processing of any compressed file. Default 100 MB.

   textfilemaxmbs
          Size limit for text files. Mostly for skipping monster logs.
          Also used for max mail msg body size. Default 20 MB. Use a value
          of -1 to disable.

   textfilepagekbs
          Page size for text files. If this is set, text/plain files will
          be divided into documents of approximately this size. This will
          reduce memory usage at index time and help with loading data in
          the preview window at query time. Particularly useful with very
          big files, such as application or system logs. Also see
          textfilemaxmbs and compressedfilemaxkbs.

   textunknownasplain
          Process unknown text/xxx files as text/plain Allows indexing
          misc. text files identified as text/whatever by "file" or
          "xdg-mime" without having to explicitely set config entries for
          them. This works fine for indexing (also will cause processing
          of a lot of useless files), but the documents indexed this way
          will be opened by the desktop viewer, even if text/plain has a
          specific editor.

   indexallfilenames
          Index the file names of unprocessed files. Index the names of
          files the contents of which we don't index because of an
          excluded or unsupported MIME type.

   usesystemfilecommand
          Use a system mechanism as last resort to guess a MIME type.
          Depending on platform and version, a compile-time configuration
          will decide if this actually executes a command or uses
          libmagic. This last-resort identification (if the suffix-based
          one failed) is generally useful, but will cause the indexing of
          many bogus extension-less "text" files. Also see
          "systemfilecommand".

   systemfilecommand
          Command to use for guessing the MIME type if the internal
          methods fail. This is ignored on Windows or with Recoll 1.38+ if
          compiled with libmagic enabled (the default). Otherwise, this
          should be a "file -i" workalike. The file path will be added as
          a last parameter to the command line. "xdg-mime" works better
          than the traditional "file" command, and is now the configured
          default (with a hard-coded fallback to "file")

   processwebqueue
          Decide if we process the Web queue. The queue is a directory
          where the Recoll Web browser plugins create the copies of
          visited pages.

   membermaxkbs
          Size limit for archive members. This is passed to the MIME
          handlers in the environment as RECOLL_FILTER_MAXMEMBERKB.

5.3.2. Parameters affecting how we generate terms and organize the index

   indexStripChars
          Decide if we store character case and diacritics in the index.
          If we do, searches sensitive to case and diacritics can be
          performed, but the index will be bigger, and some marginal
          weirdness may sometimes occur. The default is a stripped index.
          When using multiple indexes for a search, this parameter must be
          defined identically for all. Changing the value implies an index
          reset.

   indexStoreDocText
          Decide if we store the documents' text content in the index.
          Storing the text allows extracting snippets from it at query
          time, instead of building them from index position data.

          Newer Xapian index formats have rendered our use of positions
          list unacceptably slow in some cases. The last Xapian index
          format with good performance for the old method is Chert, which
          is default for 1.2, still supported but not default in 1.4 and
          will be dropped in 1.6.

          The stored document text is translated from its original format
          to UTF-8 plain text, but not stripped of upper-case, diacritics,
          or punctuation signs. Storing it increases the index size by
          10-20% typically, but also allows for nicer snippets, so it may
          be worth enabling it even if not strictly needed for performance
          if you can afford the space.

          The variable only has an effect when creating an index, meaning
          that the xapiandb directory must not exist yet. Its exact effect
          depends on the Xapian version.

          For Xapian 1.4, if the variable is set to 0, we used to use the
          Chert format and not store the text. If the variable was 1,
          Glass was used, and the text stored. We don't do this any more:
          storing the text has proved to be the much better option, and
          dropping this possibility simplifies the code.

          So now, the index format for a new index is always the default,
          but the variable still controls if the text is stored or not,
          and the abstract generation method. With Xapian 1.4 and later,
          and the variable set to 0, abstract generation may be very slow,
          but this setting may still be useful to save space if you do not
          use abstract generation at all, by using the appropriate setting
          in the GUI, and/or avoiding the Python API or recollq options
          which would trigger it.

   nonumbers
          Decides if terms will be generated for numbers. For example
          "123", "1.5e6", 192.168.1.4, would not be indexed if nonumbers
          is set ("value123" would still be). Numbers are often quite
          interesting to search for, and this should probably not be set
          except for special situations, ie, scientific documents with
          huge amounts of numbers in them, where setting nonumbers will
          reduce the index size. This can only be set for a whole index,
          not for a subtree.

   notermpositions
          Do not store term positions. Term positions allow for phrase and
          proximity searches, but make the index much bigger. In some
          special circumstances, you may want to dispense with them.

   dehyphenate
          Determines if we index "coworker" also when the input is
          "co-worker". This is new in version 1.22, and on by default.
          Setting the variable to off allows restoring the previous
          behaviour.

   indexedpunctuation
          String of UTF-8 punctuation characters to be indexed as words.
          The resulting terms will then be searchable and, for example, by
          setting the parameter to "%€" (without the double quotes), you
          would be able to search separately for "100%" or "100€" Note
          that "100%" or "100 %" would be indexed in the same way, the
          characters are their own word separators.

   backslashasletter
          Process backslash as a normal letter. This may make sense for
          people wanting to index TeX commands as such but is not of much
          general use.

   underscoreasletter
          Process underscore as normal letter. This makes sense in so many
          cases that one wonders if it should not be the default.

   maxtermlength
          Maximum term length in Unicode characters. Words longer than
          this will be discarded. The default is 40 and used to be
          hard-coded, but it can now be adjusted. You may need an index
          reset if you change the value.

   maxdbdatarecordkbs
          Maximum binary size of a Xapian document data record. The data
          record holds "stored" document metadata fields. A very big size
          usually indicates a document parse error. Xapian has a hard
          limit of around 100MB for this.

   maxdbstoredtextmbs
          Maximum binary size of a document stored text. Xapian has a hard
          limit of around 100MB for the compressed value, but our limit is
          before compression, so there may be some wiggle room.

   nocjk
          Decides if specific East Asian (Chinese Korean Japanese)
          characters/word splitting is turned off. This will save a small
          amount of CPU if you have no CJK documents. If your document
          base does include such text but you are not interested in
          searching it, setting nocjk may be a significant time and space
          saver.

   cjkngramlen
          This lets you adjust the size of n-grams used for indexing CJK
          text. The default value of 2 is probably appropriate in most
          cases. A value of 3 would allow more precision and efficiency on
          longer words, but the index will be approximately twice as
          large.

   hangultagger
          External tokenizer for Korean Hangul. This allows using an
          language specific processor for extracting terms from Korean
          text, instead of the generic n-gram term generator. See
          https://www.recoll.org/pages/recoll-korean.html for
          instructions.

   chinesetagger
          External tokenizer for Chinese. This allows using the language
          specific Jieba tokenizer for extracting meaningful terms from
          Chinese text, instead of the generic n-gram term generator. See
          https://www.recoll.org/pages/recoll-chinese.html for
          instructions.

   indexstemminglanguages
          Languages for which to create stemming expansion data. Stemmer
          names can be found by executing "recollindex -l", or this can
          also be set from a list in the GUI. The values are full language
          names, e.g. english, french...

   defaultcharset
          Default character set. This is used for files which do not
          contain a character set definition (e.g.: text/plain). Values
          found inside files, e.g. a "charset" tag in HTML documents, will
          override it. If this is not set, the default character set is
          the one defined by the NLS environment ($LC_ALL, $LC_CTYPE,
          $LANG), or ultimately iso-8859-1 (cp-1252 in fact). If for some
          reason you want a general default which does not match your LANG
          and is not 8859-1, use this variable. This can be redefined for
          any sub-directory.

   unac_except_trans
          A list of characters, encoded in UTF-8, which should be handled
          specially when converting text to unaccented lowercase. For
          example, in Swedish, the letter a with diaeresis has full
          alphabet citizenship and should not be turned into an a. Each
          element in the space-separated list has the special character as
          first element and the translation following. The handling of
          both the lowercase and upper-case versions of a character should
          be specified, as appartenance to the list will turn-off both
          standard accent and case processing. The value is global and
          affects both indexing and querying. We also convert a few
          confusing Unicode characters (quotes, hyphen) to their ASCII
          equivalent to avoid "invisible" search failures.

          Examples: Swedish: unac_except_trans = ää Ää öö Öö üü Üü ßss œoe
          Œoe æae Æae ffff fifi flfl åå Åå ’' ❜' ʼ' ‐- . German:
          unac_except_trans = ää Ää öö Öö üü Üü ßss œoe Œoe æae Æae ffff
          fifi flfl ’' ❜' ʼ' ‐- . French: you probably want to decompose oe
          and ae and nobody would type a German ß unac_except_trans = ßss
          œoe Œoe æae Æae ffff fifi flfl ’' ❜' ʼ' ‐- . The default for all
          until someone protests follows. These decompositions are not
          performed by unac, but it is unlikely that someone would type
          the composed forms in a search. unac_except_trans = ßss œoe Œoe
          æae Æae ffff fifi flfl ’' ❜' ʼ' ‐-

   maildefcharset
          Overrides the default character set for email messages which
          don't specify one. This is mainly useful for readpst (libpst)
          dumps, which are utf-8 but do not say so.

   localfields
          Set fields on all files (usually of a specific fs area). Syntax
          is the usual: name = value ; attr1 = val1 ; [...] value is empty
          so this needs an initial semi-colon. This is useful, e.g., for
          setting the rclaptg field for application selection inside
          mimeview.

   testmodifusemtime
          Use mtime instead of ctime to test if a file has been modified.
          The time is used in addition to the size, which is always used.
          Setting this can reduce re-indexing on systems where extended
          attributes are used (by some other application), but not
          indexed, because changing extended attributes only affects
          ctime. Notes: - This may prevent detection of change in some
          marginal file rename cases (the target would need to have the
          same size and mtime). - You should probably also set
          noxattrfields to 1 in this case, except if you still prefer to
          perform xattr indexing, for example if the local file update
          pattern makes it of value (as in general, there is a risk for
          pure extended attributes updates without file modification to go
          undetected). Perform a full index reset after changing this.

   noxattrfields
          Disable extended attributes conversion to metadata fields. This
          probably needs to be set if testmodifusemtime is set.

   metadatacmds
          Define commands to gather external metadata, e.g. tmsu tags.
          There can be several entries, separated by semi-colons, each
          defining which field name the data goes into and the command to
          use. Don't forget the initial semi-colon. All the field names
          must be different. You can use aliases in the "field" file if
          necessary. As a not too pretty hack conceded to convenience, any
          field name beginning with "rclmulti" will be taken as an
          indication that the command returns multiple field values inside
          a text blob formatted as a recoll configuration file ("fieldname
          = fieldvalue" lines). The rclmultixx name will be ignored, and
          field names and values will be parsed from the data. Example:
          metadatacmds = ; tags = tmsu tags %f; rclmulti1 = cmdOutputsConf
          %f

5.3.3. Parameters affecting where and how we store things

   cachedir
          Top directory for Recoll data. Recoll data directories are
          normally located relative to the configuration directory (e.g.
          ~/.recoll/xapiandb, ~/.recoll/mboxcache). If "cachedir" is set,
          the directories are stored under the specified value instead
          (e.g. if cachedir is ~/.cache/recoll, the default dbdir would be
          ~/.cache/recoll/xapiandb). This affects dbdir, webcachedir,
          mboxcachedir, aspellDicDir, which can still be individually
          specified to override cachedir. Note that if you have multiple
          configurations, each must have a different cachedir, there is no
          automatic computation of a subpath under cachedir.

   maxfsoccuppc
          Maximum file system occupation over which we stop indexing. The
          value is a percentage, corresponding to what the "Capacity" df
          output column shows. The default value is 0, meaning no
          checking. This parameter is only checked when the indexer
          starts, it will not change the behaviour or a running process.

   dbdir
          Xapian database directory location. This will be created on
          first indexing. If the value is not an absolute path, it will be
          interpreted as relative to cachedir if set, or the configuration
          directory (-c argument or $RECOLL_CONFDIR). If nothing is
          specified, the default is then ~/.recoll/xapiandb/

   idxstatusfile
          Name of the scratch file where the indexer process updates its
          status. Default: idxstatus.txt inside the configuration
          directory.

   mboxcachedir
          Directory location for storing mbox message offsets cache files.
          This is normally "mboxcache" under cachedir if set, or else
          under the configuration directory, but it may be useful to share
          a directory between different configurations.

   mboxcacheminmbs
          Minimum mbox file size over which we cache the offsets. There is
          really no sense in caching offsets for small files. The default
          is 5 MB.

   mboxmaxmsgmbs
          Maximum mbox member message size in megabytes. Size over which
          we assume that the mbox format is bad or we misinterpreted it,
          at which point we just stop processing the file.

   webcachedir
          Directory where we store the archived web pages after they are
          processed. This is only used by the Web history indexing code.
          Note that this is different from webdownloadsdir which tells the
          indexer where the web pages are stored by the browser, before
          they are indexed and stored into webcachedir. Default:
          cachedir/webcache if cachedir is set, else
          $RECOLL_CONFDIR/webcache

   webcachemaxmbs
          Maximum size in MB of the Web archive. This is only used by the
          web history indexing code. Default: 40 MB. Reducing the size
          will not physically truncate the file.

   webqueuedir
          The path to the Web indexing queue. This used to be hard-coded
          in the old plugin as ~/.recollweb/ToIndex so there would be no
          need or possibility to change it, but the WebExtensions plugin
          now downloads the files to the user Downloads directory, and a
          script moves them to webqueuedir. The script reads this value
          from the config so it has become possible to change it.

   webdownloadsdir
          The path to the browser add-on download directory. This tells
          the indexer where the Web browser add-on stores the web page
          data. The data is then moved by a script to webqueuedir, then
          processed, and finally stored in webcachedir for future
          previews.

   webcachekeepinterval
          Page recycle interval By default, only one instance of an URL is
          kept in the cache. This can be changed by setting this to a
          value determining at what frequency we keep multiple instances
          ("day", "week", "month", "year"). Note that increasing the
          interval will not erase existing entries.

   aspellDicDir
          Aspell dictionary storage directory location. The aspell
          dictionary (aspdict.(lang).rws) is normally stored in the
          directory specified by cachedir if set, or under the
          configuration directory.

   filtersdir
          Directory location for executable input handlers. If
          RECOLL_FILTERSDIR is set in the environment, we use it instead.
          Defaults to $prefix/share/recoll/filters. Can be redefined for
          subdirectories.

   iconsdir
          Directory location for icons. The only reason to change this
          would be if you want to change the icons displayed in the result
          list. Defaults to $prefix/share/recoll/images

5.3.4. Parameters affecting indexing performance and resource usage

   idxflushmb
          Threshold (megabytes of new data) where we flush from memory to
          disk index. Setting this allows some control over memory usage
          by the indexer process. A value of 0 means no explicit flushing,
          which lets Xapian perform its own thing, meaning flushing every
          $XAPIAN_FLUSH_THRESHOLD documents created, modified or deleted:
          as memory usage depends on average document size, not only
          document count, the Xapian approach is is not very useful, and
          you should let Recoll manage the flushes. The program compiled
          value is 0. The configured default value (from this file) is now
          50 MB, and should be ok in many cases. You can set it as low as
          10 to conserve memory, but if you are looking for maximum speed,
          you may want to experiment with values between 20 and 200. In my
          experience, values beyond this are always counterproductive. If
          you find otherwise, please drop me a note.

   filtermaxseconds
          Maximum external filter execution time in seconds. Default 1200
          (20mn). Set to 0 for no limit. This is mainly to avoid infinite
          loops in postscript files (loop.ps)

   filtermaxmbytes
          Maximum virtual memory space for filter processes
          (setrlimit(RLIMIT_AS)), in megabytes. Note that this includes
          any mapped libs (there is no reliable Linux way to limit the
          data space only), so we need to be a bit generous here. Anything
          over 2000 will be ignored on 32 bits machines. The high default
          value is needed because of java-based handlers (pdftk) which
          need a lot of VM (most of it text), esp. pdftk when executed
          from Python rclpdf.py. You can use a much lower value if you
          don't need Java.

   thrQSizes
          Task queue depths for each stage and threading configuration
          control. There are three internal queues in the indexing
          pipeline stages (file data extraction, terms generation, index
          update). This parameter defines the queue depths for each stage
          (three integer values). In practise, deep queues have not been
          shown to increase performance. The first value is also used to
          control threading autoconfiguration or disabling multithreading.
          If the first queue depth is set to 0 Recoll will set the queue
          depths and thread counts based on the detected number of CPUs.
          The arbitrarily chosen values are as follows (depth,nthread). 1
          CPU -> no threading. Less than 4 CPUs: (2, 2) (2, 2) (2, 1).
          Less than 6: (2, 4), (2, 2), (2, 1). Else (2, 5), (2, 3), (2,
          1). If the first queue depth is set to -1, multithreading will
          be disabled entirely. The second and third values are ignored in
          both these cases.

   thrTCounts
          Number of threads used for each indexing stage. If the first
          entry in thrQSizes is not 0 or -1, these three values define the
          number of threads used for each stage (file data extraction,
          term generation, index update). It makes no sense to use a value
          other than 1 for the last stage because updating the Xapian
          index is necessarily single-threaded (and protected by a mutex).

   thrTmpDbCnt
          Number of temporary indexes used during incremental or full
          indexing. If not set to zero, this defines how many temporary
          indexes we use during indexing. These temporary indexes are
          merged into the main one at the end of the operation. Using
          multiple indexes and a final merge can significantly improve
          indexing performance when the single-threaded Xapian index
          updates become a bottleneck. How useful this is depends on the
          type of input and CPU. See the manual for more details.

   suspendonbattery
          Suspend the real time indexing when the system runs on battery.
          The indexer will wait for a return on AC and reexec itself when
          it happens.

5.3.5. Miscellaneous parameters

   loglevel
          Log file verbosity 1-6. A value of 2 will print only errors and
          warnings. 3 will print information like document updates, 4 is
          quite verbose and 6 very verbose.

   logfilename
          Log file destination. Use "stderr" (default) to write to the
          console.

   idxloglevel
          Override loglevel for the indexer.

   idxlogfilename
          Override logfilename for the indexer.

   helperlogfilename
          Destination file for external helpers standard error output. The
          external program error output is left alone by default, e.g.
          going to the terminal when the recoll[index] program is executed
          from the command line. Use /dev/null or a file inside a
          non-existent directory to completely suppress the output.

   daemloglevel
          Override loglevel for the indexer in real time mode. The default
          is to use the idx... values if set, else the log... values.

   daemlogfilename
          Override logfilename for the indexer in real time mode. The
          default is to use the idx... values if set, else the log...
          values.

   pyloglevel
          Override loglevel for the python module.

   pylogfilename
          Override logfilename for the python module.

   idxnoautopurge
          Do not purge data for deleted or inaccessible files This can be
          overridden by recollindex command line options and may be useful
          if some parts of the document set may predictably be
          inaccessible at times, so that you would only run the purge
          after making sure that everything is there.

   orgidxconfdir
          Original location of the configuration directory. This is used
          exclusively for movable datasets. Locating the configuration
          directory inside the directory tree makes it possible to provide
          automatic query time path translations once the data set has
          moved (for example, because it has been mounted on another
          location).

   curidxconfdir
          Current location of the configuration directory. Complement
          orgidxconfdir for movable datasets. This should be used if the
          configuration directory has been copied from the dataset to
          another location, either because the dataset is readonly and an
          r/w copy is desired, or for performance reasons. This records
          the original moved location before copy, to allow path
          translation computations. For example if a dataset originally
          indexed as "/home/me/mydata/config" has been mounted to
          "/media/me/mydata", and the GUI is running from a copied
          configuration, orgidxconfdir would be "/home/me/mydata/config",
          and curidxconfdir (as set in the copied configuration) would be
          "/media/me/mydata/config".

   idxrundir
          Indexing process current directory. The input handlers sometimes
          leave temporary files in the current directory, so it makes
          sense to have recollindex chdir to some temporary directory. If
          the value is empty, the current directory is not changed. If the
          value is (literal) tmp, we use the temporary directory as set by
          the environment (RECOLL_TMPDIR else TMPDIR else /tmp). If the
          value is an absolute path to a directory, we go there.

   checkneedretryindexscript
          Script used to heuristically check if we need to retry indexing
          files which previously failed. The default script checks the
          modified dates on /usr/bin and /usr/local/bin. A relative path
          will be looked up in the filters dirs, then in the path. Use an
          absolute path to do otherwise.

   recollhelperpath
          Additional places to search for helper executables. This is
          used, e.g., on Windows by the Python code, and on Mac OS by the
          bundled recoll.app (because I could find no reliable way to tell
          launchd to set the PATH). The example below is for Windows. Use
          ":" as entry separator for Mac and Ux-like systems, ";" is for
          Windows only.

   idxabsmlen
          Length of abstracts we store while indexing. Recoll stores an
          abstract for each indexed file. The text can come from an actual
          "abstract" section in the document or will just be the beginning
          of the document. It is stored in the index so that it can be
          displayed inside the result lists without decoding the original
          file. The idxabsmlen parameter defines the size of the stored
          abstract. The default value is 250 bytes. The search interface
          gives you the choice to display this stored text or a synthetic
          abstract built by extracting text around the search terms. If
          you always prefer the synthetic abstract, you can reduce this
          value and save a little space.

   idxmetastoredlen
          Truncation length of stored metadata fields. This does not
          affect indexing (the whole field is processed anyway), just the
          amount of data stored in the index for the purpose of displaying
          fields inside result lists or previews. The default value is 150
          bytes which may be too low if you have custom fields.

   idxtexttruncatelen
          Truncation length for all document texts. Only index the
          beginning of documents. This is not recommended except if you
          are sure that the interesting keywords are at the top and have
          severe disk space issues.

   idxsynonyms
          Name of the index-time synonyms file. This is only used to issue
          multi-word single terms for multi-word synonyms so that phrase
          and proximity searches work for them (ex: applejack "apple
          jack"). The feature will only have an effect for querying if the
          query-time and index-time synonym files are the same.

   idxniceprio
          "nice" process priority for the indexing processes. Default: 19
          (lowest) Appeared with 1.26.5. Prior versions were fixed at 19.

   noaspell
          Disable aspell use. The aspell dictionary generation takes time,
          and some combinations of aspell version, language, and local
          terms, result in aspell crashing, so it sometimes makes sense to
          just disable the thing.

   aspellLanguage
          Language definitions to use when creating the aspell dictionary.
          The value must match a set of aspell language definition files.
          You can type "aspell dicts" to see a list The default if this is
          not set is to use the NLS environment to guess the value. The
          values are the 2-letter language codes (e.g. "en", "fr"...)

   aspellAddCreateParam
          Additional option and parameter to aspell dictionary creation
          command. Some aspell packages may need an additional option
          (e.g. on Debian Jessie: --local-data-dir=/usr/lib/aspell). See
          Debian bug 772415.

   aspellKeepStderr
          Set this to have a look at aspell dictionary creation errors.
          There are always many, so this is mostly for debugging.

   monauxinterval
          Auxiliary database update interval. The real time indexer only
          updates the auxiliary databases (stemdb, aspell) periodically,
          because it would be too costly to do it for every document
          change. The default period is one hour.

   monixinterval
          Minimum interval (seconds) between processings of the indexing
          queue. The real time indexer does not process each event when it
          comes in, but lets the queue accumulate, to diminish overhead
          and to aggregate multiple events affecting the same file.
          Default 30 S.

   mondelaypatterns
          Timing parameters for the real time indexing. Definitions for
          files which get a longer delay before reindexing is allowed.
          This is for fast-changing files, that should only be reindexed
          once in a while. A list of wildcardPattern:seconds pairs. The
          patterns are matched with fnmatch(pattern, path, 0) You can
          quote entries containing white space with double quotes (quote
          the whole entry, not the pattern). The default is empty.
          Example: mondelaypatterns = *.log:20 "*with spaces.*:30"

   monioniceclass
          ionice class for the indexing process. Despite the misleading
          name, and on platforms where this is supported, this affects all
          indexing processes, not only the real time/monitoring ones. The
          default value is 3 (use lowest "Idle" priority).

   monioniceclassdata
          ionice class level parameter if the class supports it. The
          default is empty, as the default "Idle" class has no levels.

5.3.6. Query-time parameters (no impact on the index)

   idxlocalguisettings
          Store some GUI parameters locally to the index. GUI settings are
          normally stored in a global file, valid for all indexes. Setting
          this parameter will make some settings, such as the result table
          setup, specific to the index.

   autodiacsens
          auto-trigger diacritics sensitivity (raw index only). IF the
          index is not stripped, decide if we automatically trigger
          diacritics sensitivity if the search term has accented
          characters (not in unac_except_trans). Else you need to use the
          query language and the "D" modifier to specify diacritics
          sensitivity. Default is no.

   autocasesens
          auto-trigger case sensitivity (raw index only). IF the index is
          not stripped (see indexStripChars), decide if we automatically
          trigger character case sensitivity if the search term has
          upper-case characters in any but the first position. Else you
          need to use the query language and the "C" modifier to specify
          character-case sensitivity. Default is yes.

   maxTermExpand
          Maximum query expansion count for a single term (e.g.: when
          using wildcards). This only affects queries, not indexing. We
          used to not limit this at all (except for filenames where the
          limit was too low at 1000), but it is unreasonable with a big
          index. Default 10000.

   maxXapianClauses
          Maximum number of clauses we add to a single Xapian query. This
          only affects queries, not indexing. In some cases, the result of
          term expansion can be multiplicative, and we want to avoid
          eating all the memory. Default 50000.

   snippetMaxPosWalk
          Maximum number of positions we walk while populating a snippet
          for the result list. The default of 1,000,000 may be
          insufficient for very big documents, the consequence would be
          snippets with possibly meaning-altering missing words.

   thumbnailercmd
          Command to use for generating thumbnails. If set, this should be
          a path to a command or script followed by its constant
          arguments. Four arguments will be appended before execution: the
          document URL, MIME type, target icon SIZE (e.g. 128), and output
          file PATH. The command should generate a thumbnail from these
          values. E.g. if the MIME is video, a script could use:
          ffmpegthumbnailer -iURL -oPATH -sSIZE.

   stemexpandphrases
          Default to applying stem expansion to phrase terms. Recoll
          normally does not apply stem expansion to terms inside phrase
          searches. Setting this parameter will change the default
          behaviour to expanding terms inside phrases. If set, you can use
          a "l" modifier to disable expansion for a specific instance.

   autoSpellRarityThreshold
          Inverse of the ratio of term occurrence to total db terms over
          which we look for spell neighbours for automatic query expansion
          When a term is very uncommon, we may (depending on user choice)
          look for spelling variations which would be more common and
          possibly add them to the query.

   autoSpellSelectionThreshold
          Ratio of spell neighbour frequency over user input term
          frequency beyond which we include the neighbour in the query.
          When a term has been selected for spelling expansion because of
          its rarity, we only include spelling neighbours which are more
          common by this ratio.

   kioshowsubdocs
          Show embedded document results in KDE dolphin/kio and krunner
          Embedded documents may clutter the results and are not always
          easily usable from the kio or krunner environment. Setting this
          variable will restrict the results to standalone documents.

5.3.7. Parameters for the PDF handler

   pdfocr
          Attempt OCR of PDF files with no text content. This can be
          defined in subdirectories. The default is off because OCR is so
          very slow.

   pdfoutline
          Extract outlines and bookmarks from PDF documents (needs
          pdftohtml). This is not enabled by default because it is rarely
          needed, and the extra command takes a little time.

   pdfattach
          Enable PDF attachment extraction by executing pdfdetach (if
          available). This used to be disabled by default because it used
          pdftk. We now use pdfdetach, which is part of poppler-utils and
          fast.

   pdfextrameta
          Extract text from selected XMP metadata tags. This is a
          space-separated list of qualified XMP tag names. Each element
          can also include a translation to a Recoll field name, separated
          by a "|" character. If the second element is absent, the tag
          name is used as the Recoll field names. You will also need to
          add specifications to the "fields" file to direct processing of
          the extracted data.

   pdfextrametafix
          Define name of XMP field editing script. This defines the name
          of a script to be loaded for editing XMP field values. The
          script should define a "MetaFixer" class with a metafix() method
          which will be called with the qualified tag name and value of
          each selected field, for editing or erasing. A new instance is
          created for each document, so that the object can keep state
          for, e.g. eliminating duplicate values.

5.3.8. Parameters for the ZIP file handler

   zipUseSkippedNames
          Use skippedNames inside Zip archives. Fetched directly by the
          rclzip.py handler. Skip the patterns defined by skippedNames
          inside Zip archives. Can be redefined for subdirectories. See
          https://www.recoll.org/faqsandhowtos/FilteringOutZipArchiveMembe
          rs.html

   zipSkippedNames
          Space-separated list of wildcard expressions for names that
          should be ignored inside zip archives. This is used directly by
          the zip handler. If zipUseSkippedNames is not set,
          zipSkippedNames defines the patterns to be skipped inside
          archives. If zipUseSkippedNames is set, the two lists are
          concatenated and used. Can be redefined for subdirectories. See
          https://www.recoll.org/faqsandhowtos/FilteringOutZipArchiveMembe
          rs.html

   zipMetaEncoding
          File path encoding. Needs Python 3.11+. Set to "detect" for
          using chardet. This is useful for non-standard zip files where
          the metadata is neither UTF-8 (indicated by a file flag), nor
          CP437 (default). The parameter can be set for specific subtrees.
          You need to install the Python3 "chardet" package if the value
          is set to "detect".

5.3.9. Parameters for the Org mode handler

   orgmodesubdocs
          Index org-mode level 1 sections as separate sub-documents This
          is the default. If set to false, org-mode files will be indexed
          as plain text

5.3.10. Parameters for the Thunderbird mbox handler

   mhmboxquirks
          Enable thunderbird/mozilla-seamonkey mbox format quirks Set this
          for the directory(ies) where the email mbox files are stored.

5.3.11. Parameters for OCR processing

   imgocr
          Tell the non-default Python image handler to run OCR. See the
          PDF section for PDF OCR. The image OCR also needs mimeconf
          changes. See the manual. imgocr can be defined for
          subdirectories.

   ocrprogs
          OCR modules to try. The top OCR script will try to load the
          corresponding modules in order and use the first which reports
          being capable of performing OCR on the input file. Modules for
          tesseract (tesseract) and ABBYY FineReader (abbyy) are present
          in the standard distribution. For compatibility with the
          previous version, if this is not defined at all, the default
          value is "tesseract". Use an explicit empty value if needed. A
          value of "abbyy tesseract" will try everything.

   ocrcachedir
          Location for caching OCR data. The default if this is empty or
          undefined is to store the cached OCR data under
          $RECOLL_CONFDIR/ocrcache.

   tesseractlang
          Language to assume for tesseract OCR. Important for improving
          the OCR accuracy. This can also be set through the contents of a
          file in the currently processed directory. See the
          rclocrtesseract.py script. Example values: eng, fra... See the
          tesseract documentation.

   tesseractcmd
          Path for the tesseract command. Do not quote. This is mostly
          useful on Windows, or for specifying a non-default tesseract
          command. E.g. on Windows. tesseractcmd =
          C:/ProgramFiles(x86)/Tesseract-OCR/tesseract.exe

   abbyylang
          Language to assume for abbyy OCR. Important for improving the
          OCR accuracy. This can also be set through the contents of a
          file in the currently processed directory. See the
          rclocrabbyy.py script. Typical values: English, French... See
          the ABBYY documentation.

   abbyyocrcmd
          Path for the abbyy command The ABBY directory is usually not in
          the path, so you should set this.

5.3.12. Parameters for running speech to text conversion

   speechtotext
          Activate speech to text conversion The only possible value at
          the moment is "whisper" for using the OpenAI whisper program.

   sttmodel
          Name of the whisper model

   sttdevice
          Name of the device to be used by for whisper

5.4. The fields file

   This file contains information about dynamic fields handling in Recoll.
   Some very basic fields have hard-wired behaviour, and, mostly, you
   should not change the original data inside the fields file. But you can
   create custom fields fitting your data and handle them just like they
   were native ones.

   The fields file has several sections, which each define an aspect of
   fields processing. Quite often, you'll have to modify several sections
   to obtain the desired behaviour.

   We will only give a short description here, you should refer to the
   comments inside the default file for more detailed information.

   Field names should be lowercase alphabetic ASCII.

   [prefixes]
          A field becomes indexed (searchable) by having a prefix defined
          in this section. There is a more complete explanation of what
          prefixes are in used by a standard recoll installation. In a
          nutshell: extension prefixes should be all caps, begin with XY,
          and short. E.g. XYMFLD.

   [values]
          Fields listed in this section will be stored as Xapian values
          inside the index. This makes them available for range queries,
          allowing to filter results according to the field value. This
          feature currently supports string and integer data. See the
          comments in the file for more detail

   [stored]
          A field becomes stored (displayable inside results) by having
          its name listed in this section (typically with an empty value).

   [aliases]
          This section defines lists of synonyms for the canonical names
          used inside the [prefixes] and [stored] sections

   [queryaliases]
          This section also defines aliases for the canonic field names,
          with the difference that the substitution will only be used at
          query time, avoiding any possibility that the value would
          pick-up random metadata from documents.

   handler-specific sections
          Some input handlers may need specific configuration for handling
          fields. Only the email message handler currently has such a
          section (named [mail]). It allows indexing arbitrary email
          headers in addition to the ones indexed by default. Other such
          sections may appear in the future.

   Here follows a small example of a personal fields file. This would
   extract a specific email header and use it as a searchable field, with
   data displayable inside result lists. (Side note: as the email handler
   does no decoding on the values, only plain ASCII headers can be
   indexed, and only the first occurrence will be used for headers that
   occur several times).
[prefixes]
# Index mailmytag contents (with the given prefix)
mailmytag = XMTAG

[stored]
# Store mailmytag inside the document data record (so that it can be
# displayed - as %(mailmytag) - in result lists).
mailmytag =

[queryaliases]
filename = fn
containerfilename = cfn

[mail]
# Extract the X-My-Tag mail header, and use it internally with the
# mailmytag field name
x-my-tag = mailmytag

5.4.1. Extended attributes in the fields file

   Recoll processes user extended file attributes as documents fields by
   default.

   Attributes are processed as fields of the same name, after removing the
   user prefix on Linux.

   The [xattrtofields] section of the fields file allows specifying
   translations from extended attributes names to Recoll field names.

   Name translations are set as xattrname = fieldname. They are
   case-sensitive. E.g. the following would map a quite an extended
   attribute named "tags" into the "keywords" field: tags = keywords.

   Entering an empty translation will disable any use of the attribute.

   The values from the extended attributes will not replace the data found
   from equivalent fields inside the document, instead they are
   concatenated.

   Special case: an extended attribute named modificationdate will set the
   dmtime field (document date) only if it is not set by an internal
   document field (e.g. email Date:).

5.5. The mimemap file

   mimemap specifies the file name extension to MIME type mappings.

   For file names without an extension, or with an unknown one, recent
   Recoll versions will use libmagic. Older versions would execute a
   system command (file -i, or xdg-mime) will be executed to determine the
   MIME type (this can be switched off, or the command changed inside the
   main configuration file).

   All extension values in mimemap must be entered in lower case. File
   names extensions are lower-cased for comparison during indexing,
   meaning that an upper case mimemap entry will never be matched.

   The mappings can be specified on a per-subtree basis, which may be
   useful in some cases. Example: okular notes have a .xml extension but
   should be handled specially, which is possible because they are usually
   all located in one place. Example:
[~/.kde/share/apps/okular/docdata]
.xml = application/x-okular-notes

   The recoll_noindex mimemap variable has been moved to recoll.conf and
   renamed to noContentSuffixes, while keeping the same function, as of
   Recoll version 1.21. For older Recoll versions, see the documentation
   for noContentSuffixes but use recoll_noindex in mimemap.

5.6. The mimeconf file

   The main purpose of the mimeconf file is to specify how the different
   MIME types are handled for indexing. This is done in the [index]
   section, which should not be modified casually. See the comments in the
   file.

   The file also contains other definitions which affect the query
   language and the GUI, and which, in retrospect, should have been stored
   elsewhere.

   The [icons] section allows you to change the icons which are displayed
   by the recoll GUI in the result lists (the values are the basenames of
   the png images inside the iconsdir directory (which is itself defined
   in recoll.conf).

   The [categories] section defines the groupings of MIME types into
   categories as used when adding an rclcat clause to a [314]query
   language query. rclcat clauses are also used by the default guifilters
   buttons in the GUI (see next).

   The filter controls appear at the top of the recoll GUI, either as
   checkboxes just above the result list, or as a dropbox in the tool
   area.

   By default, they are labeled: media, message, other, presentation,
   spreadsheet and text, and each maps to a document category. This is
   determined in the [guifilters] section, where each control is defined
   by a variable naming a query language fragment.

   A simple example will hopefully make things clearer.
[guifilters]

Big Books = dir:"~/My Books" size>10K
My Docs = dir:"~/My Documents"
Small Books = dir:"~/My Books" size<10K
System Docs = dir:/usr/share/doc

   The above definition would create four filter checkboxes, labelled Big
   Books, My Docs, etc.

   The text after the equal sign must be a valid query language fragment,
   and, when the button is checked, it will be combined with the rest of
   the query with an AND conjunction.

   Any name text before a colon character will be erased in the display,
   but used for sorting. You can use this to display the checkboxes in any
   order you like. For example, the following would do exactly the same as
   above, but ordering the checkboxes in the reverse order.
[guifilters]

d:Big Books = dir:"~/My Books" size>10K
c:My Docs = dir:"~/My Documents"
b:Small Books = dir:"~/My Books" size<10K
a:System Docs = dir:/usr/share/doc

   As you may have guessed, The default [guifilters] section looks like:
[guifilters]
text = rclcat:text
spreadsheet = rclcat:spreadsheet
presentation = rclcat:presentation
media = rclcat:media
message = rclcat:message
other = rclcat:other

5.7. The mimeview file

   mimeview specifies which programs are started when you click on an Open
   link in a result list. E.g.: HTML is normally displayed using firefox,
   but you may prefer Konqueror, your openoffice.org program might be
   named oofice instead of openoffice etc.

   Changes to this file can be done by direct editing, or through the
   recoll GUI preferences dialog.

   If Use desktop preferences to choose document editor is checked in the
   Recoll GUI preferences, all mimeview entries will be ignored except the
   one labelled application/x-all (which is set to use xdg-open by
   default).

   In this case, the xallexcepts top level variable defines a list of MIME
   type exceptions which will be processed according to the local entries
   instead of being passed to the desktop. This is so that specific Recoll
   options such as a page number or a search string can be passed to
   applications that support them, such as the evince viewer.

   As for the other configuration files, the normal usage is to have a
   mimeview inside your own configuration directory, with just the
   non-default entries, which will override those from the central
   configuration file.

   All viewer definition entries must be placed under a [view] section.

   The keys in the file are normally MIME types. You can add an
   application tag to specialize the choice for an area of the filesystem
   (using a localfields specification in mimeconf). The syntax for the key
   is mimetype|tag

   The nouncompforviewmts entry, (placed at the top level, outside of the
   [view] section), holds a list of MIME types that should not be
   uncompressed before starting the viewer (if they are found compressed,
   e.g.: mydoc.doc.gz).

   The right side of each assignment holds a command to be executed for
   opening the file. The following substitutions are performed:
     * %D.  Document date
     * %f.  File name. This may be the name of a temporary file if it was
       necessary to create one (e.g.: to extract a subdocument from a
       container).
     * %i.  Internal path, for subdocuments of containers. The format
       depends on the container type. If this appears in the command line,
       Recoll will not create a temporary file to extract the subdocument,
       expecting the called application (possibly a script) to be able to
       handle it.
     * %M. MIME type
     * %p. Page index. Only significant for a subset of document types,
       currently only PDF, Postscript and DVI files. If it is set, a
       significant term will be chosen in the query, and %p will be
       substituted with the first page where the term appears. Can be used
       to start the editor at the right page for a match or snippet.
     * %l. Line number. Only significant for document types with relevant
       line breaks, mostly text/plain and analogs. If it is set, a
       significant term will be chosen in the query, and %p will be
       substituted with the first line where the term appears.
     * %s. Search term. The value will only be set for documents with
       indexed page or line numbers and if %p or %l is also used. The
       value will be one of the matched search terms. It would allow
       pre-setting the value in the "Find" entry inside Evince for
       example, for easy highlighting of the term.
     * %u. Url.

   In addition to the predefined values above, all strings like
   %(fieldname) will be replaced by the value of the field named fieldname
   for the document. This could be used in combination with field
   customisation to help with opening the document.

5.8. The ptrans file

   ptrans specifies query-time path translations. These can be useful in
   [315]multiple cases.

   The file has a section for any index which needs translations, either
   the main one or additional query indexes. The sections are named with
   the Xapian index directory names. No slash character should exist at
   the end of the paths (all comparisons are textual). An example should
   make things sufficiently clear
[/home/me/.recoll/xapiandb]
/this/directory/moved = /to/this/place

[/path/to/additional/xapiandb]
/server/volume1/docdir = /net/server/volume1/docdir
/server/volume2/docdir = /net/server/volume2/docdir

5.9. Examples of configuration adjustments

5.9.1. Adding an external viewer for an non-indexed type

   Imagine that you have some kind of file which does not have indexable
   content, but for which you would like to have a functional Open link in
   the result list (when found by file name). The file names end in .blob
   and can be displayed by application blobviewer.

   You need two entries in the configuration files for this to work:
     * In $RECOLL_CONFDIR/mimemap (typically ~/.recoll/mimemap), add the
       following line:
.blob = application/x-blobapp
       Note that the MIME type is made up here, and you could call it
       diesel/oil just the same.
     * In $RECOLL_CONFDIR/mimeview under the [view] section, add:
application/x-blobapp = blobviewer %f
       We are supposing that blobviewer wants a file name parameter here,
       you would use %u if it liked URLs better.

   If you just wanted to change the application used by Recoll to display
   a MIME type which it already knows, you would just need to edit
   mimeview. The entries you add in your personal file override those in
   the central configuration, which you do not need to alter. mimeview can
   also be modified from the Gui.

5.9.2. Adding indexing support for a new file type

   Let us now imagine that the above .blob files actually contain
   indexable text and that you know how to extract it with a command line
   program. Getting Recoll to index the files is easy. You need to perform
   the above alteration, and also to add data to the mimeconf file
   (typically in ~/.recoll/mimeconf):
     * Under the [index] section, add the following line (more about the
       rclblob indexing script later):
application/x-blobapp = exec rclblob
       Or if the files are mostly text and you don't need to process them
       for indexing:
application/x-blobapp = internal text/plain
     * Under the [icons] section, you should choose an icon to be
       displayed for the files inside the result lists. Icons are normally
       64x64 pixels PNG files which live in /usr/share/recoll/images.
     * Under the [categories] section, you should add the MIME type where
       it makes sense (you can also create a category). Categories may be
       used for filtering in advanced search.

   The rclblob handler should be an executable program or script which
   exists inside /usr/share/recoll/filters. It will be given a file name
   as argument and should output the text or html contents on the standard
   output.

   The [316]filter programming section describes in more detail how to
   write an input handler.

Part I. Appendices

Appendix A. Processing of wild card and other special characters

   Recoll is primarily designed to search for natural language words, and
   the general rule is that non-alphanumeric characters are treated as
   white space (word separators).

   However, a number of ASCII characters receive special treatment. The
   details are described in the following.

A.1. Words and spans

   Some important searchable text elements contain non-alphanumeric
   characters, for example, email addresses (jfd@recoll.org), proper names
   (O'Brien) or internet addresses (192.168.4.1).

   If we treat the special characters as white space in this situation,
   the only way to search for these terms with a reasonable degree of
   precision would to use phrase searches ("jf dockes org").

   However, phrase searches need a lot of computation and are generally
   slower. This was especially true with older Xapian versions.

   Recoll has special processing for these elements, designated as spans.
   The corresponding linkage characters will be designated as span glue in
   the following.

   When indexing a span like jfd@recoll.org, Recoll generates both regular
   individual terms (jfd, recoll, org) and multiword terms linked by span
   glue: jfd@recoll.org, jfd@recoll, recoll.org.

   When searching, only the larger term (complete span: jfd@recoll.org) is
   used, so that Xapian executes a regular single-term search instead of a
   phrase one.

A.2. Special ASCII characters during indexing

A.2.1. Characters with hard-coded processing

   - and + have special treatment in numbers or terms like C++, else they
   are processed as span glue.

   . and ' are always processed as span glue.

   # is processed as a letter at the beginning (hashtag) or end (c#) of a
   word, else as white space.

   The underscore (_) is processed as span glue except if the
   underscoreasletter index configuration parameter is set, in which case
   it is treated as a regular letter.

A.2.2. Characters generally treated as white space

   !"$%&(),/:;<=>^\`{|}~ and wild card expression characters *[]? are
   generally treated as white space while indexing, except if they are
   included in the indexedpunctuation index configuration variable.

   Elements of indexedpunctuation are treated as single terms which allows
   to match, e.g., 93% with precisely 93% (or just 93 if preferrable).

A.2.3. Backslash

   \ is treated as a letter if backslashasletter is set, else treated as
   other punctuation according to indexedpunctuation. backslashasletter
   was broken until Recoll version 1.42.2

A.3. Special ASCII characters at search time

   Slightly different rules apply when querying, for example because of
   the special characters used by the Query language, or because of wild
   card expansion.

A.3.1. Query language special characters

   "():=<> and .. have special meaning for the query language. There is no
   way to escape them. This is generally not a problem because they are
   all treated as white space during indexing, except if included in
   indexedpunctuation. In the latter case, it is still possible to search
   for them through the All terms or Any term search modes or by using the
   Advanced search GUI dialog.

A.3.2. Wild card characters

   When querying, wild card characters generally cause an expansion of the
   term they are found in. Until Recoll version 1.42.2, there was no way
   to escape them.

   As of Recoll 1.42.2 it is possible to escape wild card characters with
   a backslash when in File name search mode and it becomes possible to
   exactly search for file names containing these characters. Previously,
   you would have had to use a less precise search by replacing them with
   a ? wild card or using another mode.

Appendix B. Building and Installation

B.1. Installing a binary copy

   Recoll binary copies are always distributed as regular packages for
   your system. They can be obtained either through the system's normal
   software distribution framework (e.g. Debian/Ubuntu apt, FreeBSD ports,
   etc.), or from some type of "backports" repository providing versions
   newer than the standard ones, or found on the Recoll Web site in some
   cases. The most up-to-date information about Recoll packages can
   usually be found on the [317]Recoll Web site downloads page

   The Windows version of Recoll comes in a self-contained setup file,
   there is nothing else to install.

   On Unix-like systems, the package management tools will automatically
   install hard dependencies for packages obtained from a proper package
   repository. You will have to deal with them by hand for downloaded
   packages (for example, when dpkg complains about missing dependencies).

   In all cases, you will have to check or install [318]supporting
   applications for the file types that you want to index beyond those
   that are natively processed by Recoll (text, HTML, email files, and a
   few others).

   You should also maybe have a look at the [319]configuration section
   (but this may not be necessary for a quick test with default
   parameters). Most parameters can be more conveniently set from the GUI
   interface.

B.2. Supporting packages

Note

   The Windows installation of Recoll is self-contained. Windows users can
   skip this section.

   Recoll uses external applications to index some file types. You need to
   install them for the file types that you wish to have indexed (these
   are run-time optional dependencies. None is needed for building or
   running Recoll except for indexing their specific file type).

   After an indexing pass, the commands that were found missing can be
   displayed from the recoll File menu. The list is stored in the missing
   text file inside the configuration directory.

   The past has proven that I was unable to maintain an up to date
   application list in this manual. Please check
   [320]https://www.recoll.org/pages/features.html for a complete list
   along with links to the home pages or best source/patches pages, and
   misc tips. What follows is only a very short extract of the stable
   essentials.
     * PDF files need pdftotext which is part of Poppler (usually comes
       with the poppler-utils package). Avoid the original one from Xpdf.
     * MS Word documents need antiword. It is also useful to have wvWare
       installed as it may be be used as a fallback for some files which
       antiword does not handle.
     * RTF files need unrtf, which, in its older versions, has much
       trouble with non-western character sets. Many Linux distributions
       carry outdated unrtf versions. Check
       [321]https://www.recoll.org/pages/features.html for details.
     * Pictures: Recoll uses the Exiftool Perl package to extract tag
       information. Most image file formats are supported.
     * Up to Recoll 1.24, many XML-based formats need the xsltproc
       command, which usually comes with libxslt. These are: abiword, fb2
       ebooks, kword, openoffice, opendocument svg. Recoll 1.25 and later
       process them internally (using libxslt).

B.3. Building from source

B.3.1. Prerequisites

   The following prerequisites are described in broad terms and Debian
   package names. The dependencies should be available as packages on most
   common Unix-like systems, and it should be quite uncommon that you
   would have to build one of them. Finding the right package name for
   non-Debian systems is left to the sagacity of the reader.

   Up to version 1.37, the Recoll build process used the GNU autotools.
   Versions 1.38 and later use meson/ninja instead.

   If you do not need the GUI, you can avoid all GUI dependencies by
   disabling its build: see the configure section further down:
   -Dqtgui=false.

   Check the [322]Recoll download page for up to date Recoll version
   information and links to source release files in tar format.

   The shopping list follows:
     * If you start from git repository source code, you will need the
       git, obviously (package: git).
     * On Unix-like systems systems, you will need the meson and ninja
       commands. (package: meson, this will bring ninja as a dependency).
       Not needed on MacOS systems at the moment.
     * The pkg-config command is needed for configuring the build
       (package: pkg-config).
     * The make command is needed for building the GUI, unneeded if you
       disable this. (package: make).
     * A C++ compiler with at least C++17 compatibility (g++ or clang).
       Recoll Versions 1.33.4 and older only required c++11.
     * The bison command is not generally needed, but might be if you
       modify the query language yacc source or if some file modification
       times are not right (package: bison).
     * For building the documentation: the xsltproc command, and the
       Docbook XML and style sheet files. You can avoid this dependency by
       disabling documentation building with the -Duserdoc=false setup
       option.
     * Development files for [323]Xapian core (libxapian-dev).
     * Development files for libxml2 and libxslt (packages: libxslt1-dev,
       which will pull libxml2-dev).
     * Development files for zlib (zlib1g-dev).
     * Development files for libmagic (libmagic-dev).
     * Development files for libaspell (package: libaspell-dev). Can be
       avoided with the -Daspell=false setup option.
     * If you want to process CHM files, you will need libchm
       (libchm-dev), else you can set the -Dpython-chm=false option to the
       setup command.
     * If you want the daemon indexer process to monitor the session for
       quitting, you need the X11 development library (package:
       libx11-dev). Else use the -Dx11mon=false setup option.
     * If you want to build the GUI: qmake and development files for
       [324]Qt 5. Else give the -Dqtgui=false setup option. Packages:
       qtbase5-dev, qttools5-dev-tools, libqt5webkit5-dev. Replace
       libqt5webkit5-dev with libqt5webengine5 if you use
       -Dwebengine=true.
     * Development files for Python3 (packages: python3-all-dev,
       python3-setuptools). You can use the -Dpython-module=false setup
       option for disabling the build of the Python extension.
     * You may also need [325]libiconv. On Linux systems, the iconv
       interface is part of libc and you should not need to do anything
       special.

B.3.2. Building

   Recoll has been built on Linux, FreeBSD, MacOS, and Solaris, most
   versions after 2005 should be ok, maybe some older ones too (Solaris 8
   used to be ok). Current Recoll versions (1.34 and later) need a c++17
   compiler and Qt5, so they will not build on old systems, but if really
   needed, you can probably find an older version which will work for you.
   If you build on another system, and need to modify things, [326]I would
   very much welcome patches.

meson setup options

   Of course the usual meson setup options, like -Dprefix=/usr apply.

   -Daspell=false will disable the code for phonetic matching of search
   terms.

   -Dfam=true or -Dinotify=true will enable the code for real time
   indexing. Inotify support is enabled by default on Linux systems. MacOS
   systems and Windows platforms now have real time indexing enabled by
   default and need no setup options.

   -Dqzeitgeist=true will enable sending Zeitgeist events about the
   visited search results, and needs the qzeitgeist package.

   -Dqtgui=false will disable the Qt graphical interface, which allows
   building the indexer and the command line search program in absence of
   a Qt environment.

   -Dwebkit=false will implement the result list with a Qt QTextBrowser
   instead of a WebKit widget if you do not or can't depend on the latter.

   -Dwebengine=true will enable the use of Qt Webengine (only meaningful
   if the Qt GUI is enabled), in place or Qt Webkit.

   -Dwebpreview=false: do not implement the GUI preview windows with
   webkit or webengine instead of qtextbrowser. Using webxx will usually
   produce a better display, but will sometimes fail to display anything
   because of javascript issues.

   -Dguidebug=true will build the recoll GUI program with debug symbols.
   This makes it very big (~50MB), which is why it is stripped by default.

   -Didxthreads=false will suppress multithreading inside the indexing
   process. You can also use the run-time configuration to restrict
   recollindex to using a single thread, but the compile-time option may
   disable a few more unused locks. This only applies to the use of
   multithreading for the core index processing (data input). The Recoll
   monitor mode always uses at least two threads of execution.

   -Dpython-module=false will avoid building the Python extension.

   -Dpython-chm=false will avoid building the Python libchm interface used
   to index CHM files.

   -Dpython-aspell=false will avoid building the Python libaspell
   interface. This is used to supplement queries with spelling guesses.

   -Dindexer=false will prevent building the indexer. Possibly useful if
   you just need the lib (e.g. for the Python extension).

   -Dsimdutf=false will prevent the use of the simdutf code normally used
   to speed up character code conversions.

   -Dcamelcase=true will enable splitting camelCase words. This is not
   enabled by default as it has the unfortunate side-effect of making some
   phrase searches quite confusing: ie, "MySQL manual" would be matched by
   "MySQL manual" and "my sql manual" but not "mysql manual" (only inside
   phrase searches).

   -Dlibmagic=false: disable the use of libmagic (use a file-like command
   instead).

   -Dfile-command=somecommand Specify the version of the 'file' command to
   use (e.g.: -Dfile-command=/usr/local/bin/file). Can be useful to enable
   the gnu version on systems where the native one is bad.

   -Dx11mon=false Disable X11 connection monitoring inside recollindex.
   Together with -Dqtgui=false, this allows building recoll without Qt and
   X11.

   -Duserdoc=false will avoid building the user manual. This avoids having
   to install the Docbook XML/XSL files and the TeX toolchain used for
   translating the manual to PDF.

   -Drecollq=true Enable building the recollq command line query tool
   (recoll -t without need for Qt). This is done by default if
   -Dqtgui=false is used but this option enables forcing it.

   -Dsystemd=false Disable the automatic installation of systemd unit
   files. Normally unit files are installed if the install path can be
   detected.

   -Dsystem-unit-dir=DIR Provide an install path for the systemd system
   unit template file.

   -Duser-unit-dir=DIR Provide an install path for the systemd user unit
   file.

Normal procedure, for source extracted from a tar distribution)

   For versions 1.38 and later (else check the manual inside the older
   source):
cd recoll-xxx
meson setup [options] build
ninja -C build

B.3.3. Installing

   Use sudo ninja install in your build tree. This will copy the commands
   to prefix/bin and the sample configuration files, scripts and other
   shared data to prefix/share/recoll.

B.3.4. Python API package

   The Python interface can be found in the source tree, under the
   python/recoll directory.

   The normal Recoll build procedure (see above) installs the API package
   for Python3.

   For meson-based versions: the python/recoll/ directory still contains a
   setup.py. This is obsoleted by meson.build but might be useful in some
   cases.

References

   1. mailto:jfd@recoll.org
   2. https://www.gnu.org/licenses/fdl.html
   3. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INTRODUCTION
   4. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INTRODUCTION.TRYIT
   5. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INTRODUCTION.SEARCH
   6. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INTRODUCTION.RECOLL
   7. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING
   8. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.INTRODUCTION
   9. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.INTRODUCTION.MODES
  10. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.INTRODUCTION.CONFIG
  11. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1339
  12. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1340
  13. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1341
  14. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.STORAGE
  15. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.STORAGE.FORMAT
  16. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.STORAGE.SECURITY
  17. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.STORAGE.BIG
  18. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG
  19. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.GUI
  20. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.MULTIPLE
  21. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1342
  22. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1343
  23. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.SENS
  24. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PERF
  25. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PERF.THREADS
  26. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PERF.MULTIDX
  27. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.QUIET
  28. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.OPERATION
  29. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PERIODIC
  30. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.MONITOR
  31. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.FIELDS
  32. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.EXTRAMETA
  33. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.EXTATTR
  34. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.EXTTAGS
  35. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.NOTES
  36. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PUNCTUATION
  37. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PDF
  38. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PDF.OUTLINE
  39. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PDF.XMP
  40. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PDF.ATTACH
  41. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.OCR
  42. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1344
  43. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1345
  44. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.STT
  45. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.REMOVABLE
  46. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.WEBQUEUE
  47. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH
  48. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.INTRODUCTION
  49. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI
  50. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.SIMPLE
  51. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.SIDEFILTERS
  52. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.RESLIST
  53. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.RESTABLE
  54. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.PREVIEW
  55. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.COMPLEX
  56. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.COMPLEX.TERMS
  57. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.COMPLEX.FILTER
  58. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.COMPLEX.HISTORY
  59. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.HISTORY
  60. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.SAVING
  61. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.SORT
  62. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.TERMEXPLORER
  63. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.FRAGBUTS
  64. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.MULTIDB
  65. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.THUMBNAILS
  66. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.RUNSCRIPT
  67. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.SHORTCUTS
  68. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.TIPS
  69. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.TIPS.TERMS
  70. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.TIPS.PHRASES
  71. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.TIPS.MISC
  72. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM
  73. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.APPLICATIONS
  74. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.PREFERENCES
  75. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.RESLIST
  76. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CMDLINE
  77. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.KIO
  78. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.COMMANDLINE
  79. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG
  80. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG.SYNTAX
  81. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG.SPECIALFIELDS
  82. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG.RANGES
  83. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG.MODIFIERS
  84. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.ANCHORWILD
  85. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.WILDCARDS
  86. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.ANCHOR
  87. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.SYNONYMS
  88. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.PTRANS
  89. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.CASEDIAC
  90. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.DESKTOP
  91. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM
  92. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FILTERS
  93. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FILTERS.SIMPLE
  94. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FILTERS.MULTIPLE
  95. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FILTERS.ASSOCIATION
  96. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FILTERS.HTML
  97. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FILTERS.PAGES
  98. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FIELDS
  99. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI
 100. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.INTRO
 101. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.ELEMENTS
 102. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.LOG
 103. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.SEARCH
 104. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.RECOLL
 105. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.RCLEXTRACT
 106. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.SEARCH.EXAMPLE
 107. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.FSUDI
 108. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.UPDATE
 109. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.UPDATE.EXTINDEXER
 110. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1346
 111. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.UPDATE.CONFIGURATION
 112. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.UPDATE.SAMPLES
 113. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.UPDATE.ASEXTINDEX
 114. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG
 115. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.OVERVIEW
 116. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.ENVIR
 117. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF
 118. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.WHATDOCS
 119. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.TERMS
 120. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.STORE
 121. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.PERFS
 122. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.MISC
 123. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.QUERY
 124. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.PDF
 125. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.ZIP
 126. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.ORGHANDLERPARAMS
 127. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.MOZMAILHANDLERPARAMS
 128. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.OCR
 129. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.SPEECHTOTEXTPARAMS
 130. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS
 131. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS.XATTR
 132. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMEMAP
 133. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMECONF
 134. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMEVIEW
 135. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.PTRANS
 136. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.EXAMPLES
 137. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.EXAMPLES.ADDVIEW
 138. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.EXAMPLES.ADDINDEX
 139. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1355
 140. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.APPDX.SPECCHARS
 141. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1347
 142. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1351
 143. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1348
 144. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1349
 145. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1350
 146. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1354
 147. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1352
 148. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1353
 149. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.BUILDINSTALL
 150. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.BINARY
 151. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.EXTERNAL
 152. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.BUILDING
 153. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.BUILDING.PREREQS
 154. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.BUILDING.BUILDING
 155. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.BUILDING.INSTALL
 156. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.BUILDING.PYTHON
 157. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#id1356
 158. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.BINARY
 159. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG
 160. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.EXTERNAL
 161. https://www.xapian.org/
 162. https://www.xapian.org/docs/intro_ir.html
 163. https://www.recoll.org/pages/recoll-chinese.html
 164. https://www.recoll.org/pages/recoll-korean.html
 165. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.SENS
 166. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG
 167. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PERIODIC.EXEC
 168. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH
 169. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.COMMANDLINE
 170. https://framagit.org/medoc92/recollwebui
 171. https://www.recoll.org/pages/download.html
 172. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI
 173. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.KIO
 174. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.REMOVABLE
 175. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PERIODIC
 176. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PERIODIC.AUTOMAT
 177. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.MONITOR
 178. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.MONITOR
 179. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG
 180. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.MULTIPLE
 181. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG
 182. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.EXTERNAL
 183. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.WHATDOCS
 184. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.INDEXEDMIMETYPES
 185. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.SKIPPEDNAMES
 186. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.EXCLUDEDMIMETYPES
 187. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF
 188. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMEMAP
 189. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.STORAGE
 190. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.DBDIR
 191. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.MULTIPLE
 192. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.STORE
 193. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.PERF.MULTIDX
 194. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG
 195. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.GUI
 196. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG
 197. https://www.recoll.org/manpages/recoll.conf.5.html
 198. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.TOPDIRS
 199. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.EXTERNAL
 200. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.SENS
 201. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.WEBQUEUE
 202. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG
 203. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.GUI
 204. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF
 205. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.TERMS
 206. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.GUI
 207. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF
 208. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.MULTIDB
 209. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.CASEDIAC
 210. https://www.recoll.org/pages/idxthreads/threadingRecoll.html
 211. https://www.recoll.org/pages/idxthreads/threadingRecoll.html#_the_xapian_bottleneck_and_how_it_was_resolved_thanks_to_xapian
 212. https://www.recoll.org/faqsandhowtos/cgroups_instructions.html
 213. https://www.recoll.org/manpages/recollindex.1.html
 214. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.MONITORDIRS
 215. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.MISC
 216. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS
 217. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS
 218. https://www.freedesktop.org/wiki/CommonExtendedAttributes
 219. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS.XATTR
 220. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.METADATACMDS
 221. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FIELDS
 222. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.OCR
 223. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.PDFEXTRAMETA
 224. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.PDFEXTRAMETAFIX
 225. https://www.recoll.org/pages/recoll_XMP/index.html
 226. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.PDFATTACH
 227. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.OCR
 228. https://ocrmypdf.readthedocs.io/en/latest/index.html
 229. https://framagit.org/medoc92/recoll/-/blob/master/src/filters/rclimg.py?ref_type=heads
 230. https://www.recoll.org/faqsandhowtos/IndexAudioWhisper.html
 231. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.ORGIDXCONFDIR
 232. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.CURIDXCONFDIR
 233. https://addons.mozilla.org/en-US/firefox/addon/recoll-we/
 234. https://www.recoll.org/faqsandhowtos/IndexWebHistory
 235. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG
 236. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.SAVING
 237. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG
 238. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.WILDCARDS
 239. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG
 240. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.CASEDIAC
 241. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG
 242. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.RESLIST
 243. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.APPLICATIONS
 244. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMECONF
 245. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMEVIEW
 246. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.RUNSCRIPT
 247. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.COMPLEX.HISTORY
 248. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.WILDCARDS
 249. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.CONFIG.MULTIPLE
 250. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.PTRANS
 251. https://www.recoll.org/faqsandhowtos/ResultsThumbnails.html
 252. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.RESLIST.PARA
 253. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.WILDCARDS
 254. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG.MODIFIERS
 255. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG
 256. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMEVIEW
 257. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMEVIEW
 258. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.RESLIST
 259. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.RESLIST
 260. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF
 261. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.RESLIST.MENU.SNIPPETS
 262. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.RESLIST.MENU.SNIPPETS
 263. https://www.recoll.org/pages/custom.html
 264. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMECONF
 265. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FIELDS
 266. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.RUNSCRIPT
 267. https://www.recoll.org/pages/custom.html
 268. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.ABSSEP
 269. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI
 270. https://www.recoll.org/manpages/recollq.1.html
 271. https://www.xesam.org/main/XesamUserSearchLanguage95
 272. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG.MODIFIERS
 273. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.WILDCARDS
 274. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG.MODIFIERS
 275. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS
 276. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.WILDCARDS.PATH
 277. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS
 278. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.COMPLEX.PHRASEANDPROX
 279. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.STEMEXPANDPHRASES
 280. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.TERMEXPLORER
 281. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.RECOLLCONF.IDXSYNONYMS
 282. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INDEXING.REMOVABLE.SELF
 283. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.PTRANS
 284. https://www.recoll.org/pages/download.html#gssp
 285. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.KIO
 286. https://www.recoll.org/faqsandhowtos/HotRecoll
 287. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FIELDS
 288. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FILTERS.HTML
 289. https://framagit.org/medoc92/recoll
 290. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMEMAP
 291. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.MIMECONF
 292. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FIELDS
 293. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FIELDS
 294. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS
 295. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.CUSTOM.RESLIST
 296. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS
 297. https://www.recoll.org/faqsandhowtos/HandleCustomField
 298. https://www.recoll.org/pages/download.html#gssp
 299. https://framagit.org/medoc92/recollwebui
 300. https://www.lesbonscomptes.com/upmpdcli/upmpdcli-manual.html#UPRCL
 301. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.BUILDING
 302. https://framagit.org/medoc92/recoll-gssp/-/blob/master/gssp-recoll.py
 303. https://framagit.org/medoc92/recollwebui/-/blob/master/webui.py
 304. https://framagit.org/medoc92/upmpdcli/-/blob/master/src/mediaserver/cdplugins/uprcl/uprclfolders.py
 305. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG.FIELDS
 306. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.UPDATE.SAMPLES
 307. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.UPDATE.SAMPLES
 308. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.ELEMENTS.UDI
 309. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.RECOLL.CLASSES.DOC
 310. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.PYTHONAPI.ELEMENTS.PARENTUDI
 311. https://framagit.org/medoc92/recoll/-/blob/master/src/python/samples/rclmbox.py
 312. https://framagit.org/medoc92/recoll/-/blob/master/src/filters/rcljoplin.py
 313. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.GUI.MULTIDB
 314. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.LANG
 315. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.SEARCH.PTRANS
 316. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.PROGRAM.FILTERS
 317. https://www.recoll.org/pages/download.html
 318. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.INSTALL.EXTERNAL
 319. file:///home/dockes/projets/fulltext/recoll/src/doc/user/usermanual.html#RCL.CONFIG
 320. https://www.recoll.org/pages/features.html#doctypes
 321. https://www.recoll.org/pages/features.html#doctypes
 322. https://www.recoll.org/pages/download.html
 323. https://www.xapian.org/
 324. https://qt-project.org/downloads
 325. https://www.gnu.org/software/libiconv/
 326. mailto:jfd@recoll.org