Browse Source

Restoring authorship annotation for <ilezhankin@yandex-team.ru>. Commit 1 of 2.

ilezhankin 3 years ago
parent
commit
1d125034f0

+ 16 - 16
contrib/libs/pdqsort/license.txt

@@ -1,16 +1,16 @@
-Copyright (c) 2015 Orson Peters <orsonpeters@gmail.com>
-
-This software is provided 'as-is', without any express or implied warranty. In no event will the
-authors be held liable for any damages arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose, including commercial
-applications, and to alter it and redistribute it freely, subject to the following restrictions:
-
-1. The origin of this software must not be misrepresented; you must not claim that you wrote the
-   original software. If you use this software in a product, an acknowledgment in the product
-   documentation would be appreciated but is not required.
-
-2. Altered source versions must be plainly marked as such, and must not be misrepresented as
-   being the original software.
-
-3. This notice may not be removed or altered from any source distribution.
+Copyright (c) 2015 Orson Peters <orsonpeters@gmail.com> 
+ 
+This software is provided 'as-is', without any express or implied warranty. In no event will the 
+authors be held liable for any damages arising from the use of this software. 
+ 
+Permission is granted to anyone to use this software for any purpose, including commercial 
+applications, and to alter it and redistribute it freely, subject to the following restrictions: 
+ 
+1. The origin of this software must not be misrepresented; you must not claim that you wrote the 
+   original software. If you use this software in a product, an acknowledgment in the product 
+   documentation would be appreciated but is not required. 
+ 
+2. Altered source versions must be plainly marked as such, and must not be misrepresented as 
+   being the original software. 
+ 
+3. This notice may not be removed or altered from any source distribution. 

+ 544 - 544
contrib/libs/pdqsort/pdqsort.h

@@ -1,544 +1,544 @@
-/*
-    pdqsort.h - Pattern-defeating quicksort.
-
-    Copyright (c) 2015 Orson Peters
-
-    This software is provided 'as-is', without any express or implied warranty. In no event will the
-    authors be held liable for any damages arising from the use of this software.
-
-    Permission is granted to anyone to use this software for any purpose, including commercial
-    applications, and to alter it and redistribute it freely, subject to the following restrictions:
-
-    1. The origin of this software must not be misrepresented; you must not claim that you wrote the
-       original software. If you use this software in a product, an acknowledgment in the product
-       documentation would be appreciated but is not required.
-
-    2. Altered source versions must be plainly marked as such, and must not be misrepresented as
-       being the original software.
-
-    3. This notice may not be removed or altered from any source distribution.
-*/
-
-
-#ifndef PDQSORT_H
-#define PDQSORT_H
-
-#include <algorithm>
-#include <cstddef>
-#include <functional>
-#include <utility>
-#include <iterator>
-
-#if __cplusplus >= 201103L
-    #include <cstdint>
-    #include <type_traits>
-    #define PDQSORT_PREFER_MOVE(x) std::move(x)
-#else
-    #define PDQSORT_PREFER_MOVE(x) (x)
-#endif
-
-
-namespace pdqsort_detail {
-    enum {
-        // Partitions below this size are sorted using insertion sort.
-        insertion_sort_threshold = 24,
-
-        // Partitions above this size use Tukey's ninther to select the pivot.
-        ninther_threshold = 128,
-
-        // When we detect an already sorted partition, attempt an insertion sort that allows this
-        // amount of element moves before giving up.
-        partial_insertion_sort_limit = 8,
-
-        // Must be multiple of 8 due to loop unrolling, and < 256 to fit in unsigned char.
-        block_size = 64,
-
-        // Cacheline size, assumes power of two.
-        cacheline_size = 64
-
-    };
-
-#if __cplusplus >= 201103L
-    template<class T> struct is_default_compare : std::false_type { };
-    template<class T> struct is_default_compare<std::less<T>> : std::true_type { };
-    template<class T> struct is_default_compare<std::greater<T>> : std::true_type { };
-#endif
-
-    // Returns floor(log2(n)), assumes n > 0.
-    template<class T>
-    inline int log2(T n) {
-        int log = 0;
-        while (n >>= 1) ++log;
-        return log;
-    }
-
-    // Sorts [begin, end) using insertion sort with the given comparison function.
-    template<class Iter, class Compare>
-    inline void insertion_sort(Iter begin, Iter end, Compare comp) {
-        typedef typename std::iterator_traits<Iter>::value_type T;
-        if (begin == end) return;
-
-        for (Iter cur = begin + 1; cur != end; ++cur) {
-            Iter sift = cur;
-            Iter sift_1 = cur - 1;
-
-            // Compare first so we can avoid 2 moves for an element already positioned correctly.
-            if (comp(*sift, *sift_1)) {
-                T tmp = PDQSORT_PREFER_MOVE(*sift);
-
-                do { *sift-- = PDQSORT_PREFER_MOVE(*sift_1); }
-                while (sift != begin && comp(tmp, *--sift_1));
-
-                *sift = PDQSORT_PREFER_MOVE(tmp);
-            }
-        }
-    }
-
-    // Sorts [begin, end) using insertion sort with the given comparison function. Assumes
-    // *(begin - 1) is an element smaller than or equal to any element in [begin, end).
-    template<class Iter, class Compare>
-    inline void unguarded_insertion_sort(Iter begin, Iter end, Compare comp) {
-        typedef typename std::iterator_traits<Iter>::value_type T;
-        if (begin == end) return;
-
-        for (Iter cur = begin + 1; cur != end; ++cur) {
-            Iter sift = cur;
-            Iter sift_1 = cur - 1;
-
-            // Compare first so we can avoid 2 moves for an element already positioned correctly.
-            if (comp(*sift, *sift_1)) {
-                T tmp = PDQSORT_PREFER_MOVE(*sift);
-
-                do { *sift-- = PDQSORT_PREFER_MOVE(*sift_1); }
-                while (comp(tmp, *--sift_1));
-
-                *sift = PDQSORT_PREFER_MOVE(tmp);
-            }
-        }
-    }
-
-    // Attempts to use insertion sort on [begin, end). Will return false if more than
-    // partial_insertion_sort_limit elements were moved, and abort sorting. Otherwise it will
-    // successfully sort and return true.
-    template<class Iter, class Compare>
-    inline bool partial_insertion_sort(Iter begin, Iter end, Compare comp) {
-        typedef typename std::iterator_traits<Iter>::value_type T;
-        if (begin == end) return true;
-        
-        std::size_t limit = 0;
-        for (Iter cur = begin + 1; cur != end; ++cur) {
-            Iter sift = cur;
-            Iter sift_1 = cur - 1;
-
-            // Compare first so we can avoid 2 moves for an element already positioned correctly.
-            if (comp(*sift, *sift_1)) {
-                T tmp = PDQSORT_PREFER_MOVE(*sift);
-
-                do { *sift-- = PDQSORT_PREFER_MOVE(*sift_1); }
-                while (sift != begin && comp(tmp, *--sift_1));
-
-                *sift = PDQSORT_PREFER_MOVE(tmp);
-                limit += cur - sift;
-            }
-            
-            if (limit > partial_insertion_sort_limit) return false;
-        }
-
-        return true;
-    }
-
-    template<class Iter, class Compare>
-    inline void sort2(Iter a, Iter b, Compare comp) {
-        if (comp(*b, *a)) std::iter_swap(a, b);
-    }
-
-    // Sorts the elements *a, *b and *c using comparison function comp.
-    template<class Iter, class Compare>
-    inline void sort3(Iter a, Iter b, Iter c, Compare comp) {
-        sort2(a, b, comp);
-        sort2(b, c, comp);
-        sort2(a, b, comp);
-    }
-
-    template<class T>
-    inline T* align_cacheline(T* p) {
-#if defined(UINTPTR_MAX) && __cplusplus >= 201103L
-        std::uintptr_t ip = reinterpret_cast<std::uintptr_t>(p);
-#else
-        std::size_t ip = reinterpret_cast<std::size_t>(p);
-#endif
-        ip = (ip + cacheline_size - 1) & -cacheline_size;
-        return reinterpret_cast<T*>(ip);
-    }
-
-    template<class Iter>
-    inline void swap_offsets(Iter first, Iter last,
-                             unsigned char* offsets_l, unsigned char* offsets_r,
-                             int num, bool use_swaps) {
-        typedef typename std::iterator_traits<Iter>::value_type T;
-        if (use_swaps) {
-            // This case is needed for the descending distribution, where we need
-            // to have proper swapping for pdqsort to remain O(n).
-            for (int i = 0; i < num; ++i) {
-                std::iter_swap(first + offsets_l[i], last - offsets_r[i]);
-            }
-        } else if (num > 0) {
-            Iter l = first + offsets_l[0]; Iter r = last - offsets_r[0];
-            T tmp(PDQSORT_PREFER_MOVE(*l)); *l = PDQSORT_PREFER_MOVE(*r);
-            for (int i = 1; i < num; ++i) {
-                l = first + offsets_l[i]; *r = PDQSORT_PREFER_MOVE(*l);
-                r = last - offsets_r[i]; *l = PDQSORT_PREFER_MOVE(*r);
-            }
-            *r = PDQSORT_PREFER_MOVE(tmp);
-        }
-    }
-
-    // Partitions [begin, end) around pivot *begin using comparison function comp. Elements equal
-    // to the pivot are put in the right-hand partition. Returns the position of the pivot after
-    // partitioning and whether the passed sequence already was correctly partitioned. Assumes the
-    // pivot is a median of at least 3 elements and that [begin, end) is at least
-    // insertion_sort_threshold long. Uses branchless partitioning.
-    template<class Iter, class Compare>
-    inline std::pair<Iter, bool> partition_right_branchless(Iter begin, Iter end, Compare comp) {
-        typedef typename std::iterator_traits<Iter>::value_type T;
-
-        // Move pivot into local for speed.
-        T pivot(PDQSORT_PREFER_MOVE(*begin));
-        Iter first = begin;
-        Iter last = end;
-
-        // Find the first element greater than or equal than the pivot (the median of 3 guarantees
-        // this exists).
-        while (comp(*++first, pivot));
-
-        // Find the first element strictly smaller than the pivot. We have to guard this search if
-        // there was no element before *first.
-        if (first - 1 == begin) while (first < last && !comp(*--last, pivot));
-        else                    while (                !comp(*--last, pivot));
-
-        // If the first pair of elements that should be swapped to partition are the same element,
-        // the passed in sequence already was correctly partitioned.
-        bool already_partitioned = first >= last;
-        if (!already_partitioned) {
-            std::iter_swap(first, last);
-            ++first;
-        }
-
-        // The following branchless partitioning is derived from "BlockQuicksort: How Branch
-        // Mispredictions don’t affect Quicksort" by Stefan Edelkamp and Armin Weiss.
-        unsigned char offsets_l_storage[block_size + cacheline_size];
-        unsigned char offsets_r_storage[block_size + cacheline_size];
-        unsigned char* offsets_l = align_cacheline(offsets_l_storage);
-        unsigned char* offsets_r = align_cacheline(offsets_r_storage);
-        int num_l, num_r, start_l, start_r;
-        num_l = num_r = start_l = start_r = 0;
-        
-        while (last - first > 2 * block_size) {
-            // Fill up offset blocks with elements that are on the wrong side.
-            if (num_l == 0) {
-                start_l = 0;
-                Iter it = first;
-                for (unsigned char i = 0; i < block_size;) {
-                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it;
-                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it;
-                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it;
-                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it;
-                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it;
-                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it;
-                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it;
-                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it;
-                }
-            }
-            if (num_r == 0) {
-                start_r = 0;
-                Iter it = last;
-                for (unsigned char i = 0; i < block_size;) {
-                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot);
-                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot);
-                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot);
-                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot);
-                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot);
-                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot);
-                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot);
-                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot);
-                }
-            }
-
-            // Swap elements and update block sizes and first/last boundaries.
-            int num = std::min(num_l, num_r);
-            swap_offsets(first, last, offsets_l + start_l, offsets_r + start_r,
-                         num, num_l == num_r);
-            num_l -= num; num_r -= num;
-            start_l += num; start_r += num;
-            if (num_l == 0) first += block_size;
-            if (num_r == 0) last -= block_size;
-        }
-
-        int l_size = 0, r_size = 0;
-        int unknown_left = (int)(last - first) - ((num_r || num_l) ? block_size : 0);
-        if (num_r) {
-            // Handle leftover block by assigning the unknown elements to the other block.
-            l_size = unknown_left;
-            r_size = block_size;
-        } else if (num_l) {
-            l_size = block_size;
-            r_size = unknown_left;
-        } else {
-            // No leftover block, split the unknown elements in two blocks.
-            l_size = unknown_left/2;
-            r_size = unknown_left - l_size;
-        }
-
-        // Fill offset buffers if needed.
-        if (unknown_left && !num_l) {
-            start_l = 0;
-            Iter it = first;
-            for (unsigned char i = 0; i < l_size;) {
-                offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it;
-            }
-        }
-        if (unknown_left && !num_r) {
-            start_r = 0;
-            Iter it = last;
-            for (unsigned char i = 0; i < r_size;) {
-                offsets_r[num_r] = ++i; num_r += comp(*--it, pivot);
-            }
-        }
-
-        int num = std::min(num_l, num_r);
-        swap_offsets(first, last, offsets_l + start_l, offsets_r + start_r, num, num_l == num_r);
-        num_l -= num; num_r -= num;
-        start_l += num; start_r += num;
-        if (num_l == 0) first += l_size;
-        if (num_r == 0) last -= r_size;
-        
-        // We have now fully identified [first, last)'s proper position. Swap the last elements.
-        if (num_l) {
-            offsets_l += start_l;
-            while (num_l--) std::iter_swap(first + offsets_l[num_l], --last);
-            first = last;
-        }
-        if (num_r) {
-            offsets_r += start_r;
-            while (num_r--) std::iter_swap(last - offsets_r[num_r], first), ++first;
-            last = first;
-        }
-
-        // Put the pivot in the right place.
-        Iter pivot_pos = first - 1;
-        *begin = PDQSORT_PREFER_MOVE(*pivot_pos);
-        *pivot_pos = PDQSORT_PREFER_MOVE(pivot);
-
-        return std::make_pair(pivot_pos, already_partitioned);
-    }
-
-    // Partitions [begin, end) around pivot *begin using comparison function comp. Elements equal
-    // to the pivot are put in the right-hand partition. Returns the position of the pivot after
-    // partitioning and whether the passed sequence already was correctly partitioned. Assumes the
-    // pivot is a median of at least 3 elements and that [begin, end) is at least
-    // insertion_sort_threshold long.
-    template<class Iter, class Compare>
-    inline std::pair<Iter, bool> partition_right(Iter begin, Iter end, Compare comp) {
-        typedef typename std::iterator_traits<Iter>::value_type T;
-        
-        // Move pivot into local for speed.
-        T pivot(PDQSORT_PREFER_MOVE(*begin));
-
-        Iter first = begin;
-        Iter last = end;
-
-        // Find the first element greater than or equal than the pivot (the median of 3 guarantees
-        // this exists).
-        while (comp(*++first, pivot));
-
-        // Find the first element strictly smaller than the pivot. We have to guard this search if
-        // there was no element before *first.
-        if (first - 1 == begin) while (first < last && !comp(*--last, pivot));
-        else                    while (                !comp(*--last, pivot));
-
-        // If the first pair of elements that should be swapped to partition are the same element,
-        // the passed in sequence already was correctly partitioned.
-        bool already_partitioned = first >= last;
-        
-        // Keep swapping pairs of elements that are on the wrong side of the pivot. Previously
-        // swapped pairs guard the searches, which is why the first iteration is special-cased
-        // above.
-        while (first < last) {
-            std::iter_swap(first, last);
-            while (comp(*++first, pivot));
-            while (!comp(*--last, pivot));
-        }
-
-        // Put the pivot in the right place.
-        Iter pivot_pos = first - 1;
-        *begin = PDQSORT_PREFER_MOVE(*pivot_pos);
-        *pivot_pos = PDQSORT_PREFER_MOVE(pivot);
-
-        return std::make_pair(pivot_pos, already_partitioned);
-    }
-
-    // Similar function to the one above, except elements equal to the pivot are put to the left of
-    // the pivot and it doesn't check or return if the passed sequence already was partitioned.
-    // Since this is rarely used (the many equal case), and in that case pdqsort already has O(n)
-    // performance, no block quicksort is applied here for simplicity.
-    template<class Iter, class Compare>
-    inline Iter partition_left(Iter begin, Iter end, Compare comp) {
-        typedef typename std::iterator_traits<Iter>::value_type T;
-
-        T pivot(PDQSORT_PREFER_MOVE(*begin));
-        Iter first = begin;
-        Iter last = end;
-        
-        while (comp(pivot, *--last));
-
-        if (last + 1 == end) while (first < last && !comp(pivot, *++first));
-        else                 while (                !comp(pivot, *++first));
-
-        while (first < last) {
-            std::iter_swap(first, last);
-            while (comp(pivot, *--last));
-            while (!comp(pivot, *++first));
-        }
-
-        Iter pivot_pos = last;
-        *begin = PDQSORT_PREFER_MOVE(*pivot_pos);
-        *pivot_pos = PDQSORT_PREFER_MOVE(pivot);
-
-        return pivot_pos;
-    }
-
-
-    template<class Iter, class Compare, bool Branchless>
-    inline void pdqsort_loop(Iter begin, Iter end, Compare comp, int bad_allowed, bool leftmost = true) {
-        typedef typename std::iterator_traits<Iter>::difference_type diff_t;
-
-        // Use a while loop for tail recursion elimination.
-        while (true) {
-            diff_t size = end - begin;
-
-            // Insertion sort is faster for small arrays.
-            if (size < insertion_sort_threshold) {
-                if (leftmost) insertion_sort(begin, end, comp);
-                else unguarded_insertion_sort(begin, end, comp);
-                return;
-            }
-
-            // Choose pivot as median of 3 or pseudomedian of 9.
-            diff_t s2 = size / 2;
-            if (size > ninther_threshold) {
-                sort3(begin, begin + s2, end - 1, comp);
-                sort3(begin + 1, begin + (s2 - 1), end - 2, comp);
-                sort3(begin + 2, begin + (s2 + 1), end - 3, comp);
-                sort3(begin + (s2 - 1), begin + s2, begin + (s2 + 1), comp);
-                std::iter_swap(begin, begin + s2);
-            } else sort3(begin + s2, begin, end - 1, comp);
-
-            // If *(begin - 1) is the end of the right partition of a previous partition operation
-            // there is no element in [begin, end) that is smaller than *(begin - 1). Then if our
-            // pivot compares equal to *(begin - 1) we change strategy, putting equal elements in
-            // the left partition, greater elements in the right partition. We do not have to
-            // recurse on the left partition, since it's sorted (all equal).
-            if (!leftmost && !comp(*(begin - 1), *begin)) {
-                begin = partition_left(begin, end, comp) + 1;
-                continue;
-            }
-
-            // Partition and get results.
-            std::pair<Iter, bool> part_result =
-                Branchless ? partition_right_branchless(begin, end, comp)
-                           : partition_right(begin, end, comp);
-            Iter pivot_pos = part_result.first;
-            bool already_partitioned = part_result.second;
-
-            // Check for a highly unbalanced partition.
-            diff_t l_size = pivot_pos - begin;
-            diff_t r_size = end - (pivot_pos + 1);
-            bool highly_unbalanced = l_size < size / 8 || r_size < size / 8;
-
-            // If we got a highly unbalanced partition we shuffle elements to break many patterns.
-            if (highly_unbalanced) {
-                // If we had too many bad partitions, switch to heapsort to guarantee O(n log n).
-                if (--bad_allowed == 0) {
-                    std::make_heap(begin, end, comp);
-                    std::sort_heap(begin, end, comp);
-                    return;
-                }
-
-                if (l_size >= insertion_sort_threshold) {
-                    std::iter_swap(begin,             begin + l_size / 4);
-                    std::iter_swap(pivot_pos - 1, pivot_pos - l_size / 4);
-
-                    if (l_size > ninther_threshold) {
-                        std::iter_swap(begin + 1,         begin + (l_size / 4 + 1));
-                        std::iter_swap(begin + 2,         begin + (l_size / 4 + 2));
-                        std::iter_swap(pivot_pos - 2, pivot_pos - (l_size / 4 + 1));
-                        std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2));
-                    }
-                }
-                
-                if (r_size >= insertion_sort_threshold) {
-                    std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4));
-                    std::iter_swap(end - 1,                   end - r_size / 4);
-                    
-                    if (r_size > ninther_threshold) {
-                        std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4));
-                        std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4));
-                        std::iter_swap(end - 2,             end - (1 + r_size / 4));
-                        std::iter_swap(end - 3,             end - (2 + r_size / 4));
-                    }
-                }
-            } else {
-                // If we were decently balanced and we tried to sort an already partitioned
-                // sequence try to use insertion sort.
-                if (already_partitioned && partial_insertion_sort(begin, pivot_pos, comp)
-                                        && partial_insertion_sort(pivot_pos + 1, end, comp)) return;
-            }
-                
-            // Sort the left partition first using recursion and do tail recursion elimination for
-            // the right-hand partition.
-            pdqsort_loop<Iter, Compare, Branchless>(begin, pivot_pos, comp, bad_allowed, leftmost);
-            begin = pivot_pos + 1;
-            leftmost = false;
-        }
-    }
-}
-
-
-template<class Iter, class Compare>
-inline void pdqsort(Iter begin, Iter end, Compare comp) {
-    if (begin == end) return;
-
-#if __cplusplus >= 201103L
-    pdqsort_detail::pdqsort_loop<Iter, Compare,
-        pdqsort_detail::is_default_compare<typename std::decay<Compare>::type>::value &&
-        std::is_arithmetic<typename std::iterator_traits<Iter>::value_type>::value>(
-        begin, end, comp, pdqsort_detail::log2(end - begin));
-#else
-    pdqsort_detail::pdqsort_loop<Iter, Compare, false>(
-        begin, end, comp, pdqsort_detail::log2(end - begin));
-#endif
-}
-
-template<class Iter>
-inline void pdqsort(Iter begin, Iter end) {
-    typedef typename std::iterator_traits<Iter>::value_type T;
-    pdqsort(begin, end, std::less<T>());
-}
-
-template<class Iter, class Compare>
-inline void pdqsort_branchless(Iter begin, Iter end, Compare comp) {
-    if (begin == end) return;
-    pdqsort_detail::pdqsort_loop<Iter, Compare, true>(
-        begin, end, comp, pdqsort_detail::log2(end - begin));
-}
-
-template<class Iter>
-inline void pdqsort_branchless(Iter begin, Iter end) {
-    typedef typename std::iterator_traits<Iter>::value_type T;
-    pdqsort_branchless(begin, end, std::less<T>());
-}
-
-
-#undef PDQSORT_PREFER_MOVE
-
-#endif
+/* 
+    pdqsort.h - Pattern-defeating quicksort. 
+ 
+    Copyright (c) 2015 Orson Peters 
+ 
+    This software is provided 'as-is', without any express or implied warranty. In no event will the 
+    authors be held liable for any damages arising from the use of this software. 
+ 
+    Permission is granted to anyone to use this software for any purpose, including commercial 
+    applications, and to alter it and redistribute it freely, subject to the following restrictions: 
+ 
+    1. The origin of this software must not be misrepresented; you must not claim that you wrote the 
+       original software. If you use this software in a product, an acknowledgment in the product 
+       documentation would be appreciated but is not required. 
+ 
+    2. Altered source versions must be plainly marked as such, and must not be misrepresented as 
+       being the original software. 
+ 
+    3. This notice may not be removed or altered from any source distribution. 
+*/ 
+ 
+ 
+#ifndef PDQSORT_H 
+#define PDQSORT_H 
+ 
+#include <algorithm> 
+#include <cstddef> 
+#include <functional> 
+#include <utility> 
+#include <iterator> 
+ 
+#if __cplusplus >= 201103L 
+    #include <cstdint> 
+    #include <type_traits> 
+    #define PDQSORT_PREFER_MOVE(x) std::move(x) 
+#else 
+    #define PDQSORT_PREFER_MOVE(x) (x) 
+#endif 
+ 
+ 
+namespace pdqsort_detail { 
+    enum { 
+        // Partitions below this size are sorted using insertion sort. 
+        insertion_sort_threshold = 24, 
+ 
+        // Partitions above this size use Tukey's ninther to select the pivot. 
+        ninther_threshold = 128, 
+ 
+        // When we detect an already sorted partition, attempt an insertion sort that allows this 
+        // amount of element moves before giving up. 
+        partial_insertion_sort_limit = 8, 
+ 
+        // Must be multiple of 8 due to loop unrolling, and < 256 to fit in unsigned char. 
+        block_size = 64, 
+ 
+        // Cacheline size, assumes power of two. 
+        cacheline_size = 64 
+ 
+    }; 
+ 
+#if __cplusplus >= 201103L 
+    template<class T> struct is_default_compare : std::false_type { }; 
+    template<class T> struct is_default_compare<std::less<T>> : std::true_type { }; 
+    template<class T> struct is_default_compare<std::greater<T>> : std::true_type { }; 
+#endif 
+ 
+    // Returns floor(log2(n)), assumes n > 0. 
+    template<class T> 
+    inline int log2(T n) { 
+        int log = 0; 
+        while (n >>= 1) ++log; 
+        return log; 
+    } 
+ 
+    // Sorts [begin, end) using insertion sort with the given comparison function. 
+    template<class Iter, class Compare> 
+    inline void insertion_sort(Iter begin, Iter end, Compare comp) { 
+        typedef typename std::iterator_traits<Iter>::value_type T; 
+        if (begin == end) return; 
+ 
+        for (Iter cur = begin + 1; cur != end; ++cur) { 
+            Iter sift = cur; 
+            Iter sift_1 = cur - 1; 
+ 
+            // Compare first so we can avoid 2 moves for an element already positioned correctly. 
+            if (comp(*sift, *sift_1)) { 
+                T tmp = PDQSORT_PREFER_MOVE(*sift); 
+ 
+                do { *sift-- = PDQSORT_PREFER_MOVE(*sift_1); } 
+                while (sift != begin && comp(tmp, *--sift_1)); 
+ 
+                *sift = PDQSORT_PREFER_MOVE(tmp); 
+            } 
+        } 
+    } 
+ 
+    // Sorts [begin, end) using insertion sort with the given comparison function. Assumes 
+    // *(begin - 1) is an element smaller than or equal to any element in [begin, end). 
+    template<class Iter, class Compare> 
+    inline void unguarded_insertion_sort(Iter begin, Iter end, Compare comp) { 
+        typedef typename std::iterator_traits<Iter>::value_type T; 
+        if (begin == end) return; 
+ 
+        for (Iter cur = begin + 1; cur != end; ++cur) { 
+            Iter sift = cur; 
+            Iter sift_1 = cur - 1; 
+ 
+            // Compare first so we can avoid 2 moves for an element already positioned correctly. 
+            if (comp(*sift, *sift_1)) { 
+                T tmp = PDQSORT_PREFER_MOVE(*sift); 
+ 
+                do { *sift-- = PDQSORT_PREFER_MOVE(*sift_1); } 
+                while (comp(tmp, *--sift_1)); 
+ 
+                *sift = PDQSORT_PREFER_MOVE(tmp); 
+            } 
+        } 
+    } 
+ 
+    // Attempts to use insertion sort on [begin, end). Will return false if more than 
+    // partial_insertion_sort_limit elements were moved, and abort sorting. Otherwise it will 
+    // successfully sort and return true. 
+    template<class Iter, class Compare> 
+    inline bool partial_insertion_sort(Iter begin, Iter end, Compare comp) { 
+        typedef typename std::iterator_traits<Iter>::value_type T; 
+        if (begin == end) return true; 
+         
+        std::size_t limit = 0; 
+        for (Iter cur = begin + 1; cur != end; ++cur) { 
+            Iter sift = cur; 
+            Iter sift_1 = cur - 1; 
+ 
+            // Compare first so we can avoid 2 moves for an element already positioned correctly. 
+            if (comp(*sift, *sift_1)) { 
+                T tmp = PDQSORT_PREFER_MOVE(*sift); 
+ 
+                do { *sift-- = PDQSORT_PREFER_MOVE(*sift_1); } 
+                while (sift != begin && comp(tmp, *--sift_1)); 
+ 
+                *sift = PDQSORT_PREFER_MOVE(tmp); 
+                limit += cur - sift; 
+            } 
+             
+            if (limit > partial_insertion_sort_limit) return false; 
+        } 
+ 
+        return true; 
+    } 
+ 
+    template<class Iter, class Compare> 
+    inline void sort2(Iter a, Iter b, Compare comp) { 
+        if (comp(*b, *a)) std::iter_swap(a, b); 
+    } 
+ 
+    // Sorts the elements *a, *b and *c using comparison function comp. 
+    template<class Iter, class Compare> 
+    inline void sort3(Iter a, Iter b, Iter c, Compare comp) { 
+        sort2(a, b, comp); 
+        sort2(b, c, comp); 
+        sort2(a, b, comp); 
+    } 
+ 
+    template<class T> 
+    inline T* align_cacheline(T* p) { 
+#if defined(UINTPTR_MAX) && __cplusplus >= 201103L 
+        std::uintptr_t ip = reinterpret_cast<std::uintptr_t>(p); 
+#else 
+        std::size_t ip = reinterpret_cast<std::size_t>(p); 
+#endif 
+        ip = (ip + cacheline_size - 1) & -cacheline_size; 
+        return reinterpret_cast<T*>(ip); 
+    } 
+ 
+    template<class Iter> 
+    inline void swap_offsets(Iter first, Iter last, 
+                             unsigned char* offsets_l, unsigned char* offsets_r, 
+                             int num, bool use_swaps) { 
+        typedef typename std::iterator_traits<Iter>::value_type T; 
+        if (use_swaps) { 
+            // This case is needed for the descending distribution, where we need 
+            // to have proper swapping for pdqsort to remain O(n). 
+            for (int i = 0; i < num; ++i) { 
+                std::iter_swap(first + offsets_l[i], last - offsets_r[i]); 
+            } 
+        } else if (num > 0) { 
+            Iter l = first + offsets_l[0]; Iter r = last - offsets_r[0]; 
+            T tmp(PDQSORT_PREFER_MOVE(*l)); *l = PDQSORT_PREFER_MOVE(*r); 
+            for (int i = 1; i < num; ++i) { 
+                l = first + offsets_l[i]; *r = PDQSORT_PREFER_MOVE(*l); 
+                r = last - offsets_r[i]; *l = PDQSORT_PREFER_MOVE(*r); 
+            } 
+            *r = PDQSORT_PREFER_MOVE(tmp); 
+        } 
+    } 
+ 
+    // Partitions [begin, end) around pivot *begin using comparison function comp. Elements equal 
+    // to the pivot are put in the right-hand partition. Returns the position of the pivot after 
+    // partitioning and whether the passed sequence already was correctly partitioned. Assumes the 
+    // pivot is a median of at least 3 elements and that [begin, end) is at least 
+    // insertion_sort_threshold long. Uses branchless partitioning. 
+    template<class Iter, class Compare> 
+    inline std::pair<Iter, bool> partition_right_branchless(Iter begin, Iter end, Compare comp) { 
+        typedef typename std::iterator_traits<Iter>::value_type T; 
+ 
+        // Move pivot into local for speed. 
+        T pivot(PDQSORT_PREFER_MOVE(*begin)); 
+        Iter first = begin; 
+        Iter last = end; 
+ 
+        // Find the first element greater than or equal than the pivot (the median of 3 guarantees 
+        // this exists). 
+        while (comp(*++first, pivot)); 
+ 
+        // Find the first element strictly smaller than the pivot. We have to guard this search if 
+        // there was no element before *first. 
+        if (first - 1 == begin) while (first < last && !comp(*--last, pivot)); 
+        else                    while (                !comp(*--last, pivot)); 
+ 
+        // If the first pair of elements that should be swapped to partition are the same element, 
+        // the passed in sequence already was correctly partitioned. 
+        bool already_partitioned = first >= last; 
+        if (!already_partitioned) { 
+            std::iter_swap(first, last); 
+            ++first; 
+        } 
+ 
+        // The following branchless partitioning is derived from "BlockQuicksort: How Branch 
+        // Mispredictions don’t affect Quicksort" by Stefan Edelkamp and Armin Weiss. 
+        unsigned char offsets_l_storage[block_size + cacheline_size]; 
+        unsigned char offsets_r_storage[block_size + cacheline_size]; 
+        unsigned char* offsets_l = align_cacheline(offsets_l_storage); 
+        unsigned char* offsets_r = align_cacheline(offsets_r_storage); 
+        int num_l, num_r, start_l, start_r; 
+        num_l = num_r = start_l = start_r = 0; 
+         
+        while (last - first > 2 * block_size) { 
+            // Fill up offset blocks with elements that are on the wrong side. 
+            if (num_l == 0) { 
+                start_l = 0; 
+                Iter it = first; 
+                for (unsigned char i = 0; i < block_size;) { 
+                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; 
+                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; 
+                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; 
+                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; 
+                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; 
+                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; 
+                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; 
+                    offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; 
+                } 
+            } 
+            if (num_r == 0) { 
+                start_r = 0; 
+                Iter it = last; 
+                for (unsigned char i = 0; i < block_size;) { 
+                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); 
+                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); 
+                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); 
+                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); 
+                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); 
+                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); 
+                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); 
+                    offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); 
+                } 
+            } 
+ 
+            // Swap elements and update block sizes and first/last boundaries. 
+            int num = std::min(num_l, num_r); 
+            swap_offsets(first, last, offsets_l + start_l, offsets_r + start_r, 
+                         num, num_l == num_r); 
+            num_l -= num; num_r -= num; 
+            start_l += num; start_r += num; 
+            if (num_l == 0) first += block_size; 
+            if (num_r == 0) last -= block_size; 
+        } 
+ 
+        int l_size = 0, r_size = 0; 
+        int unknown_left = (int)(last - first) - ((num_r || num_l) ? block_size : 0); 
+        if (num_r) { 
+            // Handle leftover block by assigning the unknown elements to the other block. 
+            l_size = unknown_left; 
+            r_size = block_size; 
+        } else if (num_l) { 
+            l_size = block_size; 
+            r_size = unknown_left; 
+        } else { 
+            // No leftover block, split the unknown elements in two blocks. 
+            l_size = unknown_left/2; 
+            r_size = unknown_left - l_size; 
+        } 
+ 
+        // Fill offset buffers if needed. 
+        if (unknown_left && !num_l) { 
+            start_l = 0; 
+            Iter it = first; 
+            for (unsigned char i = 0; i < l_size;) { 
+                offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; 
+            } 
+        } 
+        if (unknown_left && !num_r) { 
+            start_r = 0; 
+            Iter it = last; 
+            for (unsigned char i = 0; i < r_size;) { 
+                offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); 
+            } 
+        } 
+ 
+        int num = std::min(num_l, num_r); 
+        swap_offsets(first, last, offsets_l + start_l, offsets_r + start_r, num, num_l == num_r); 
+        num_l -= num; num_r -= num; 
+        start_l += num; start_r += num; 
+        if (num_l == 0) first += l_size; 
+        if (num_r == 0) last -= r_size; 
+         
+        // We have now fully identified [first, last)'s proper position. Swap the last elements. 
+        if (num_l) { 
+            offsets_l += start_l; 
+            while (num_l--) std::iter_swap(first + offsets_l[num_l], --last); 
+            first = last; 
+        } 
+        if (num_r) { 
+            offsets_r += start_r; 
+            while (num_r--) std::iter_swap(last - offsets_r[num_r], first), ++first; 
+            last = first; 
+        } 
+ 
+        // Put the pivot in the right place. 
+        Iter pivot_pos = first - 1; 
+        *begin = PDQSORT_PREFER_MOVE(*pivot_pos); 
+        *pivot_pos = PDQSORT_PREFER_MOVE(pivot); 
+ 
+        return std::make_pair(pivot_pos, already_partitioned); 
+    } 
+ 
+    // Partitions [begin, end) around pivot *begin using comparison function comp. Elements equal 
+    // to the pivot are put in the right-hand partition. Returns the position of the pivot after 
+    // partitioning and whether the passed sequence already was correctly partitioned. Assumes the 
+    // pivot is a median of at least 3 elements and that [begin, end) is at least 
+    // insertion_sort_threshold long. 
+    template<class Iter, class Compare> 
+    inline std::pair<Iter, bool> partition_right(Iter begin, Iter end, Compare comp) { 
+        typedef typename std::iterator_traits<Iter>::value_type T; 
+         
+        // Move pivot into local for speed. 
+        T pivot(PDQSORT_PREFER_MOVE(*begin)); 
+ 
+        Iter first = begin; 
+        Iter last = end; 
+ 
+        // Find the first element greater than or equal than the pivot (the median of 3 guarantees 
+        // this exists). 
+        while (comp(*++first, pivot)); 
+ 
+        // Find the first element strictly smaller than the pivot. We have to guard this search if 
+        // there was no element before *first. 
+        if (first - 1 == begin) while (first < last && !comp(*--last, pivot)); 
+        else                    while (                !comp(*--last, pivot)); 
+ 
+        // If the first pair of elements that should be swapped to partition are the same element, 
+        // the passed in sequence already was correctly partitioned. 
+        bool already_partitioned = first >= last; 
+         
+        // Keep swapping pairs of elements that are on the wrong side of the pivot. Previously 
+        // swapped pairs guard the searches, which is why the first iteration is special-cased 
+        // above. 
+        while (first < last) { 
+            std::iter_swap(first, last); 
+            while (comp(*++first, pivot)); 
+            while (!comp(*--last, pivot)); 
+        } 
+ 
+        // Put the pivot in the right place. 
+        Iter pivot_pos = first - 1; 
+        *begin = PDQSORT_PREFER_MOVE(*pivot_pos); 
+        *pivot_pos = PDQSORT_PREFER_MOVE(pivot); 
+ 
+        return std::make_pair(pivot_pos, already_partitioned); 
+    } 
+ 
+    // Similar function to the one above, except elements equal to the pivot are put to the left of 
+    // the pivot and it doesn't check or return if the passed sequence already was partitioned. 
+    // Since this is rarely used (the many equal case), and in that case pdqsort already has O(n) 
+    // performance, no block quicksort is applied here for simplicity. 
+    template<class Iter, class Compare> 
+    inline Iter partition_left(Iter begin, Iter end, Compare comp) { 
+        typedef typename std::iterator_traits<Iter>::value_type T; 
+ 
+        T pivot(PDQSORT_PREFER_MOVE(*begin)); 
+        Iter first = begin; 
+        Iter last = end; 
+         
+        while (comp(pivot, *--last)); 
+ 
+        if (last + 1 == end) while (first < last && !comp(pivot, *++first)); 
+        else                 while (                !comp(pivot, *++first)); 
+ 
+        while (first < last) { 
+            std::iter_swap(first, last); 
+            while (comp(pivot, *--last)); 
+            while (!comp(pivot, *++first)); 
+        } 
+ 
+        Iter pivot_pos = last; 
+        *begin = PDQSORT_PREFER_MOVE(*pivot_pos); 
+        *pivot_pos = PDQSORT_PREFER_MOVE(pivot); 
+ 
+        return pivot_pos; 
+    } 
+ 
+ 
+    template<class Iter, class Compare, bool Branchless> 
+    inline void pdqsort_loop(Iter begin, Iter end, Compare comp, int bad_allowed, bool leftmost = true) { 
+        typedef typename std::iterator_traits<Iter>::difference_type diff_t; 
+ 
+        // Use a while loop for tail recursion elimination. 
+        while (true) { 
+            diff_t size = end - begin; 
+ 
+            // Insertion sort is faster for small arrays. 
+            if (size < insertion_sort_threshold) { 
+                if (leftmost) insertion_sort(begin, end, comp); 
+                else unguarded_insertion_sort(begin, end, comp); 
+                return; 
+            } 
+ 
+            // Choose pivot as median of 3 or pseudomedian of 9. 
+            diff_t s2 = size / 2; 
+            if (size > ninther_threshold) { 
+                sort3(begin, begin + s2, end - 1, comp); 
+                sort3(begin + 1, begin + (s2 - 1), end - 2, comp); 
+                sort3(begin + 2, begin + (s2 + 1), end - 3, comp); 
+                sort3(begin + (s2 - 1), begin + s2, begin + (s2 + 1), comp); 
+                std::iter_swap(begin, begin + s2); 
+            } else sort3(begin + s2, begin, end - 1, comp); 
+ 
+            // If *(begin - 1) is the end of the right partition of a previous partition operation 
+            // there is no element in [begin, end) that is smaller than *(begin - 1). Then if our 
+            // pivot compares equal to *(begin - 1) we change strategy, putting equal elements in 
+            // the left partition, greater elements in the right partition. We do not have to 
+            // recurse on the left partition, since it's sorted (all equal). 
+            if (!leftmost && !comp(*(begin - 1), *begin)) { 
+                begin = partition_left(begin, end, comp) + 1; 
+                continue; 
+            } 
+ 
+            // Partition and get results. 
+            std::pair<Iter, bool> part_result = 
+                Branchless ? partition_right_branchless(begin, end, comp) 
+                           : partition_right(begin, end, comp); 
+            Iter pivot_pos = part_result.first; 
+            bool already_partitioned = part_result.second; 
+ 
+            // Check for a highly unbalanced partition. 
+            diff_t l_size = pivot_pos - begin; 
+            diff_t r_size = end - (pivot_pos + 1); 
+            bool highly_unbalanced = l_size < size / 8 || r_size < size / 8; 
+ 
+            // If we got a highly unbalanced partition we shuffle elements to break many patterns. 
+            if (highly_unbalanced) { 
+                // If we had too many bad partitions, switch to heapsort to guarantee O(n log n). 
+                if (--bad_allowed == 0) { 
+                    std::make_heap(begin, end, comp); 
+                    std::sort_heap(begin, end, comp); 
+                    return; 
+                } 
+ 
+                if (l_size >= insertion_sort_threshold) { 
+                    std::iter_swap(begin,             begin + l_size / 4); 
+                    std::iter_swap(pivot_pos - 1, pivot_pos - l_size / 4); 
+ 
+                    if (l_size > ninther_threshold) { 
+                        std::iter_swap(begin + 1,         begin + (l_size / 4 + 1)); 
+                        std::iter_swap(begin + 2,         begin + (l_size / 4 + 2)); 
+                        std::iter_swap(pivot_pos - 2, pivot_pos - (l_size / 4 + 1)); 
+                        std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2)); 
+                    } 
+                } 
+                 
+                if (r_size >= insertion_sort_threshold) { 
+                    std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4)); 
+                    std::iter_swap(end - 1,                   end - r_size / 4); 
+                     
+                    if (r_size > ninther_threshold) { 
+                        std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4)); 
+                        std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4)); 
+                        std::iter_swap(end - 2,             end - (1 + r_size / 4)); 
+                        std::iter_swap(end - 3,             end - (2 + r_size / 4)); 
+                    } 
+                } 
+            } else { 
+                // If we were decently balanced and we tried to sort an already partitioned 
+                // sequence try to use insertion sort. 
+                if (already_partitioned && partial_insertion_sort(begin, pivot_pos, comp) 
+                                        && partial_insertion_sort(pivot_pos + 1, end, comp)) return; 
+            } 
+                 
+            // Sort the left partition first using recursion and do tail recursion elimination for 
+            // the right-hand partition. 
+            pdqsort_loop<Iter, Compare, Branchless>(begin, pivot_pos, comp, bad_allowed, leftmost); 
+            begin = pivot_pos + 1; 
+            leftmost = false; 
+        } 
+    } 
+} 
+ 
+ 
+template<class Iter, class Compare> 
+inline void pdqsort(Iter begin, Iter end, Compare comp) { 
+    if (begin == end) return; 
+ 
+#if __cplusplus >= 201103L 
+    pdqsort_detail::pdqsort_loop<Iter, Compare, 
+        pdqsort_detail::is_default_compare<typename std::decay<Compare>::type>::value && 
+        std::is_arithmetic<typename std::iterator_traits<Iter>::value_type>::value>( 
+        begin, end, comp, pdqsort_detail::log2(end - begin)); 
+#else 
+    pdqsort_detail::pdqsort_loop<Iter, Compare, false>( 
+        begin, end, comp, pdqsort_detail::log2(end - begin)); 
+#endif 
+} 
+ 
+template<class Iter> 
+inline void pdqsort(Iter begin, Iter end) { 
+    typedef typename std::iterator_traits<Iter>::value_type T; 
+    pdqsort(begin, end, std::less<T>()); 
+} 
+ 
+template<class Iter, class Compare> 
+inline void pdqsort_branchless(Iter begin, Iter end, Compare comp) { 
+    if (begin == end) return; 
+    pdqsort_detail::pdqsort_loop<Iter, Compare, true>( 
+        begin, end, comp, pdqsort_detail::log2(end - begin)); 
+} 
+ 
+template<class Iter> 
+inline void pdqsort_branchless(Iter begin, Iter end) { 
+    typedef typename std::iterator_traits<Iter>::value_type T; 
+    pdqsort_branchless(begin, end, std::less<T>()); 
+} 
+ 
+ 
+#undef PDQSORT_PREFER_MOVE 
+ 
+#endif 

+ 11 - 11
contrib/libs/pdqsort/ya.make

@@ -2,19 +2,19 @@ OWNER(
     g:cpp-contrib
     g:clickhouse
 )
-
-# Origin: https://github.com/orlp/pdqsort
-
-LIBRARY()
-
+ 
+# Origin: https://github.com/orlp/pdqsort 
+ 
+LIBRARY() 
+ 
 LICENSE(Zlib)
 
 LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
 
-VERSION(978bc36a9bd4143a54b2551cfd9ce8a6afd6d04c)
-
-NO_UTIL()
-
-NO_RUNTIME()
+VERSION(978bc36a9bd4143a54b2551cfd9ce8a6afd6d04c) 
+ 
+NO_UTIL() 
 
-END()
+NO_RUNTIME() 
+ 
+END() 

+ 2 - 2
contrib/libs/ya.make

@@ -141,7 +141,7 @@ RECURSE(
     libcpuid
     libcroco
     libde265
-    libdivide
+    libdivide 
     libdivsufsort2
     libeatmydata/dynamic
     libev
@@ -265,7 +265,7 @@ RECURSE(
     pango
     pcre
     pcre2
-    pdqsort
+    pdqsort 
     pffft
     pfr
     picohttpparser

+ 24 - 24
contrib/python/Pygments/py2/LICENSE

@@ -1,25 +1,25 @@
 Copyright (c) 2006-2019 by the respective authors (see AUTHORS file).
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright
-  notice, this list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright
-  notice, this list of conditions and the following disclaimer in the
-  documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+All rights reserved. 
+ 
+Redistribution and use in source and binary forms, with or without 
+modification, are permitted provided that the following conditions are 
+met: 
+ 
+* Redistributions of source code must retain the above copyright 
+  notice, this list of conditions and the following disclaimer. 
+ 
+* Redistributions in binary form must reproduce the above copyright 
+  notice, this list of conditions and the following disclaimer in the 
+  documentation and/or other materials provided with the distribution. 
+ 
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 

+ 76 - 76
contrib/python/Pygments/py2/pygments/__init__.py

@@ -1,90 +1,90 @@
-# -*- coding: utf-8 -*-
-"""
-    Pygments
-    ~~~~~~~~
-
-    Pygments is a syntax highlighting package written in Python.
-
-    It is a generic syntax highlighter for general use in all kinds of software
-    such as forum systems, wikis or other applications that need to prettify
-    source code. Highlights are:
-
-    * a wide range of common languages and markup formats is supported
-    * special attention is paid to details, increasing quality by a fair amount
-    * support for new languages and formats are added easily
-    * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
-      formats that PIL supports, and ANSI sequences
-    * it is usable as a command-line tool and as a library
-    * ... and it highlights even Brainfuck!
-
+# -*- coding: utf-8 -*- 
+""" 
+    Pygments 
+    ~~~~~~~~ 
+ 
+    Pygments is a syntax highlighting package written in Python. 
+ 
+    It is a generic syntax highlighter for general use in all kinds of software 
+    such as forum systems, wikis or other applications that need to prettify 
+    source code. Highlights are: 
+ 
+    * a wide range of common languages and markup formats is supported 
+    * special attention is paid to details, increasing quality by a fair amount 
+    * support for new languages and formats are added easily 
+    * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image 
+      formats that PIL supports, and ANSI sequences 
+    * it is usable as a command-line tool and as a library 
+    * ... and it highlights even Brainfuck! 
+ 
     The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
-
+ 
     .. _Pygments master branch:
        https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
-
+ 
     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-"""
+    :license: BSD, see LICENSE for details. 
+""" 
 import sys
-
+ 
 from pygments.util import StringIO, BytesIO
 
 __version__ = '2.5.2'
-__docformat__ = 'restructuredtext'
-
-__all__ = ['lex', 'format', 'highlight']
-
-
-def lex(code, lexer):
-    """
-    Lex ``code`` with ``lexer`` and return an iterable of tokens.
-    """
-    try:
-        return lexer.get_tokens(code)
-    except TypeError as err:
+__docformat__ = 'restructuredtext' 
+ 
+__all__ = ['lex', 'format', 'highlight'] 
+ 
+ 
+def lex(code, lexer): 
+    """ 
+    Lex ``code`` with ``lexer`` and return an iterable of tokens. 
+    """ 
+    try: 
+        return lexer.get_tokens(code) 
+    except TypeError as err: 
         if (isinstance(err.args[0], str) and
             ('unbound method get_tokens' in err.args[0] or
              'missing 1 required positional argument' in err.args[0])):
-            raise TypeError('lex() argument must be a lexer instance, '
-                            'not a class')
-        raise
-
-
-def format(tokens, formatter, outfile=None):  # pylint: disable=redefined-builtin
-    """
-    Format a tokenlist ``tokens`` with the formatter ``formatter``.
-
-    If ``outfile`` is given and a valid file object (an object
-    with a ``write`` method), the result will be written to it, otherwise
-    it is returned as a string.
-    """
-    try:
-        if not outfile:
-            realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
-            formatter.format(tokens, realoutfile)
-            return realoutfile.getvalue()
-        else:
-            formatter.format(tokens, outfile)
-    except TypeError as err:
+            raise TypeError('lex() argument must be a lexer instance, ' 
+                            'not a class') 
+        raise 
+ 
+ 
+def format(tokens, formatter, outfile=None):  # pylint: disable=redefined-builtin 
+    """ 
+    Format a tokenlist ``tokens`` with the formatter ``formatter``. 
+ 
+    If ``outfile`` is given and a valid file object (an object 
+    with a ``write`` method), the result will be written to it, otherwise 
+    it is returned as a string. 
+    """ 
+    try: 
+        if not outfile: 
+            realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO() 
+            formatter.format(tokens, realoutfile) 
+            return realoutfile.getvalue() 
+        else: 
+            formatter.format(tokens, outfile) 
+    except TypeError as err: 
         if (isinstance(err.args[0], str) and
             ('unbound method format' in err.args[0] or
              'missing 1 required positional argument' in err.args[0])):
-            raise TypeError('format() argument must be a formatter instance, '
-                            'not a class')
-        raise
-
-
-def highlight(code, lexer, formatter, outfile=None):
-    """
-    Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
-
-    If ``outfile`` is given and a valid file object (an object
-    with a ``write`` method), the result will be written to it, otherwise
-    it is returned as a string.
-    """
-    return format(lex(code, lexer), formatter, outfile)
-
-
-if __name__ == '__main__':  # pragma: no cover
-    from pygments.cmdline import main
-    sys.exit(main(sys.argv))
+            raise TypeError('format() argument must be a formatter instance, ' 
+                            'not a class') 
+        raise 
+ 
+ 
+def highlight(code, lexer, formatter, outfile=None): 
+    """ 
+    Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``. 
+ 
+    If ``outfile`` is given and a valid file object (an object 
+    with a ``write`` method), the result will be written to it, otherwise 
+    it is returned as a string. 
+    """ 
+    return format(lex(code, lexer), formatter, outfile) 
+ 
+ 
+if __name__ == '__main__':  # pragma: no cover 
+    from pygments.cmdline import main 
+    sys.exit(main(sys.argv)) 

+ 511 - 511
contrib/python/Pygments/py2/pygments/cmdline.py

@@ -1,64 +1,64 @@
-# -*- coding: utf-8 -*-
-"""
-    pygments.cmdline
-    ~~~~~~~~~~~~~~~~
-
-    Command line interface.
-
+# -*- coding: utf-8 -*- 
+""" 
+    pygments.cmdline 
+    ~~~~~~~~~~~~~~~~ 
+ 
+    Command line interface. 
+ 
     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-"""
-
-from __future__ import print_function
-
+    :license: BSD, see LICENSE for details. 
+""" 
+ 
+from __future__ import print_function 
+ 
 import os
-import sys
-import getopt
-from textwrap import dedent
-
-from pygments import __version__, highlight
-from pygments.util import ClassNotFound, OptionError, docstring_headline, \
-    guess_decode, guess_decode_from_terminal, terminal_encoding
-from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
+import sys 
+import getopt 
+from textwrap import dedent 
+ 
+from pygments import __version__, highlight 
+from pygments.util import ClassNotFound, OptionError, docstring_headline, \ 
+    guess_decode, guess_decode_from_terminal, terminal_encoding 
+from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \ 
     load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
-from pygments.lexers.special import TextLexer
-from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
-from pygments.formatters import get_all_formatters, get_formatter_by_name, \
+from pygments.lexers.special import TextLexer 
+from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter 
+from pygments.formatters import get_all_formatters, get_formatter_by_name, \ 
     load_formatter_from_file, get_formatter_for_filename, find_formatter_class
-from pygments.formatters.terminal import TerminalFormatter
+from pygments.formatters.terminal import TerminalFormatter 
 from pygments.formatters.terminal256 import Terminal256Formatter
-from pygments.filters import get_all_filters, find_filter_class
-from pygments.styles import get_all_styles, get_style_by_name
-
-
-USAGE = """\
-Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
+from pygments.filters import get_all_filters, find_filter_class 
+from pygments.styles import get_all_styles, get_style_by_name 
+ 
+ 
+USAGE = """\ 
+Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>] 
           [-O <options>] [-P <option=value>] [-s] [-v] [-x] [-o <outfile>] [<infile>]
-
-       %s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
-       %s -L [<which> ...]
-       %s -N <filename>
-       %s -H <type> <name>
-       %s -h | -V
-
-Highlight the input file and write the result to <outfile>.
-
-If no input file is given, use stdin, if -o is not given, use stdout.
-
-If -s is passed, lexing will be done in "streaming" mode, reading and
-highlighting one line at a time.  This will only work properly with
-lexers that have no constructs spanning multiple lines!
-
-<lexer> is a lexer name (query all lexer names with -L). If -l is not
-given, the lexer is guessed from the extension of the input file name
-(this obviously doesn't work if the input is stdin).  If -g is passed,
-attempt to guess the lexer from the file contents, or pass through as
-plain text if this fails (this can work for stdin).
-
-Likewise, <formatter> is a formatter name, and will be guessed from
-the extension of the output file name. If no output file is given,
-the terminal formatter will be used by default.
-
+ 
+       %s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>] 
+       %s -L [<which> ...] 
+       %s -N <filename> 
+       %s -H <type> <name> 
+       %s -h | -V 
+ 
+Highlight the input file and write the result to <outfile>. 
+ 
+If no input file is given, use stdin, if -o is not given, use stdout. 
+ 
+If -s is passed, lexing will be done in "streaming" mode, reading and 
+highlighting one line at a time.  This will only work properly with 
+lexers that have no constructs spanning multiple lines! 
+ 
+<lexer> is a lexer name (query all lexer names with -L). If -l is not 
+given, the lexer is guessed from the extension of the input file name 
+(this obviously doesn't work if the input is stdin).  If -g is passed, 
+attempt to guess the lexer from the file contents, or pass through as 
+plain text if this fails (this can work for stdin). 
+ 
+Likewise, <formatter> is a formatter name, and will be guessed from 
+the extension of the output file name. If no output file is given, 
+the terminal formatter will be used by default. 
+ 
 The additional option -x allows custom lexers and formatters to be
 loaded from a .py file relative to the current working directory. For
 example, ``-l ./customlexer.py -x``. By default, this option expects a
@@ -67,274 +67,274 @@ specify your own class name with a colon (``-l ./lexer.py:MyLexer``).
 Users should be very careful not to use this option with untrusted files,
 because it will import and run them.
 
-With the -O option, you can give the lexer and formatter a comma-
-separated list of options, e.g. ``-O bg=light,python=cool``.
-
-The -P option adds lexer and formatter options like the -O option, but
-you can only give one option per -P. That way, the option value may
-contain commas and equals signs, which it can't with -O, e.g.
-``-P "heading=Pygments, the Python highlighter".
-
-With the -F option, you can add filters to the token stream, you can
-give options in the same way as for -O after a colon (note: there must
-not be spaces around the colon).
-
-The -O, -P and -F options can be given multiple times.
-
-With the -S option, print out style definitions for style <style>
-for formatter <formatter>. The argument given by -a is formatter
-dependent.
-
-The -L option lists lexers, formatters, styles or filters -- set
-`which` to the thing you want to list (e.g. "styles"), or omit it to
-list everything.
-
-The -N option guesses and prints out a lexer name based solely on
-the given filename. It does not take input or highlight anything.
-If no specific lexer can be determined "text" is returned.
-
-The -H option prints detailed help for the object <name> of type <type>,
-where <type> is one of "lexer", "formatter" or "filter".
-
-The -s option processes lines one at a time until EOF, rather than
-waiting to process the entire file.  This only works for stdin, and
-is intended for streaming input such as you get from 'tail -f'.
-Example usage: "tail -f sql.log | pygmentize -s -l sql"
-
-The -v option prints a detailed traceback on unhandled exceptions,
-which is useful for debugging and bug reports.
-
-The -h option prints this help.
-The -V option prints the package version.
-"""
-
-
-def _parse_options(o_strs):
-    opts = {}
-    if not o_strs:
-        return opts
-    for o_str in o_strs:
-        if not o_str.strip():
-            continue
-        o_args = o_str.split(',')
-        for o_arg in o_args:
-            o_arg = o_arg.strip()
-            try:
-                o_key, o_val = o_arg.split('=', 1)
-                o_key = o_key.strip()
-                o_val = o_val.strip()
-            except ValueError:
-                opts[o_arg] = True
-            else:
-                opts[o_key] = o_val
-    return opts
-
-
-def _parse_filters(f_strs):
-    filters = []
-    if not f_strs:
-        return filters
-    for f_str in f_strs:
-        if ':' in f_str:
-            fname, fopts = f_str.split(':', 1)
-            filters.append((fname, _parse_options([fopts])))
-        else:
-            filters.append((f_str, {}))
-    return filters
-
-
-def _print_help(what, name):
-    try:
-        if what == 'lexer':
-            cls = get_lexer_by_name(name)
-            print("Help on the %s lexer:" % cls.name)
-            print(dedent(cls.__doc__))
-        elif what == 'formatter':
-            cls = find_formatter_class(name)
-            print("Help on the %s formatter:" % cls.name)
-            print(dedent(cls.__doc__))
-        elif what == 'filter':
-            cls = find_filter_class(name)
-            print("Help on the %s filter:" % name)
-            print(dedent(cls.__doc__))
-        return 0
-    except (AttributeError, ValueError):
-        print("%s not found!" % what, file=sys.stderr)
-        return 1
-
-
-def _print_list(what):
-    if what == 'lexer':
-        print()
-        print("Lexers:")
-        print("~~~~~~~")
-
-        info = []
-        for fullname, names, exts, _ in get_all_lexers():
-            tup = (', '.join(names)+':', fullname,
-                   exts and '(filenames ' + ', '.join(exts) + ')' or '')
-            info.append(tup)
-        info.sort()
-        for i in info:
-            print(('* %s\n    %s %s') % i)
-
-    elif what == 'formatter':
-        print()
-        print("Formatters:")
-        print("~~~~~~~~~~~")
-
-        info = []
-        for cls in get_all_formatters():
-            doc = docstring_headline(cls)
-            tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
-                   '(filenames ' + ', '.join(cls.filenames) + ')' or '')
-            info.append(tup)
-        info.sort()
-        for i in info:
-            print(('* %s\n    %s %s') % i)
-
-    elif what == 'filter':
-        print()
-        print("Filters:")
-        print("~~~~~~~~")
-
-        for name in get_all_filters():
-            cls = find_filter_class(name)
-            print("* " + name + ':')
-            print("    %s" % docstring_headline(cls))
-
-    elif what == 'style':
-        print()
-        print("Styles:")
-        print("~~~~~~~")
-
-        for name in get_all_styles():
-            cls = get_style_by_name(name)
-            print("* " + name + ':')
-            print("    %s" % docstring_headline(cls))
-
-
-def main_inner(popts, args, usage):
-    opts = {}
-    O_opts = []
-    P_opts = []
-    F_opts = []
-    for opt, arg in popts:
-        if opt == '-O':
-            O_opts.append(arg)
-        elif opt == '-P':
-            P_opts.append(arg)
-        elif opt == '-F':
-            F_opts.append(arg)
-        opts[opt] = arg
-
-    if opts.pop('-h', None) is not None:
-        print(usage)
-        return 0
-
-    if opts.pop('-V', None) is not None:
+With the -O option, you can give the lexer and formatter a comma- 
+separated list of options, e.g. ``-O bg=light,python=cool``. 
+ 
+The -P option adds lexer and formatter options like the -O option, but 
+you can only give one option per -P. That way, the option value may 
+contain commas and equals signs, which it can't with -O, e.g. 
+``-P "heading=Pygments, the Python highlighter". 
+ 
+With the -F option, you can add filters to the token stream, you can 
+give options in the same way as for -O after a colon (note: there must 
+not be spaces around the colon). 
+ 
+The -O, -P and -F options can be given multiple times. 
+ 
+With the -S option, print out style definitions for style <style> 
+for formatter <formatter>. The argument given by -a is formatter 
+dependent. 
+ 
+The -L option lists lexers, formatters, styles or filters -- set 
+`which` to the thing you want to list (e.g. "styles"), or omit it to 
+list everything. 
+ 
+The -N option guesses and prints out a lexer name based solely on 
+the given filename. It does not take input or highlight anything. 
+If no specific lexer can be determined "text" is returned. 
+ 
+The -H option prints detailed help for the object <name> of type <type>, 
+where <type> is one of "lexer", "formatter" or "filter". 
+ 
+The -s option processes lines one at a time until EOF, rather than 
+waiting to process the entire file.  This only works for stdin, and 
+is intended for streaming input such as you get from 'tail -f'. 
+Example usage: "tail -f sql.log | pygmentize -s -l sql" 
+ 
+The -v option prints a detailed traceback on unhandled exceptions, 
+which is useful for debugging and bug reports. 
+ 
+The -h option prints this help. 
+The -V option prints the package version. 
+""" 
+ 
+ 
+def _parse_options(o_strs): 
+    opts = {} 
+    if not o_strs: 
+        return opts 
+    for o_str in o_strs: 
+        if not o_str.strip(): 
+            continue 
+        o_args = o_str.split(',') 
+        for o_arg in o_args: 
+            o_arg = o_arg.strip() 
+            try: 
+                o_key, o_val = o_arg.split('=', 1) 
+                o_key = o_key.strip() 
+                o_val = o_val.strip() 
+            except ValueError: 
+                opts[o_arg] = True 
+            else: 
+                opts[o_key] = o_val 
+    return opts 
+ 
+ 
+def _parse_filters(f_strs): 
+    filters = [] 
+    if not f_strs: 
+        return filters 
+    for f_str in f_strs: 
+        if ':' in f_str: 
+            fname, fopts = f_str.split(':', 1) 
+            filters.append((fname, _parse_options([fopts]))) 
+        else: 
+            filters.append((f_str, {})) 
+    return filters 
+ 
+ 
+def _print_help(what, name): 
+    try: 
+        if what == 'lexer': 
+            cls = get_lexer_by_name(name) 
+            print("Help on the %s lexer:" % cls.name) 
+            print(dedent(cls.__doc__)) 
+        elif what == 'formatter': 
+            cls = find_formatter_class(name) 
+            print("Help on the %s formatter:" % cls.name) 
+            print(dedent(cls.__doc__)) 
+        elif what == 'filter': 
+            cls = find_filter_class(name) 
+            print("Help on the %s filter:" % name) 
+            print(dedent(cls.__doc__)) 
+        return 0 
+    except (AttributeError, ValueError): 
+        print("%s not found!" % what, file=sys.stderr) 
+        return 1 
+ 
+ 
+def _print_list(what): 
+    if what == 'lexer': 
+        print() 
+        print("Lexers:") 
+        print("~~~~~~~") 
+ 
+        info = [] 
+        for fullname, names, exts, _ in get_all_lexers(): 
+            tup = (', '.join(names)+':', fullname, 
+                   exts and '(filenames ' + ', '.join(exts) + ')' or '') 
+            info.append(tup) 
+        info.sort() 
+        for i in info: 
+            print(('* %s\n    %s %s') % i) 
+ 
+    elif what == 'formatter': 
+        print() 
+        print("Formatters:") 
+        print("~~~~~~~~~~~") 
+ 
+        info = [] 
+        for cls in get_all_formatters(): 
+            doc = docstring_headline(cls) 
+            tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and 
+                   '(filenames ' + ', '.join(cls.filenames) + ')' or '') 
+            info.append(tup) 
+        info.sort() 
+        for i in info: 
+            print(('* %s\n    %s %s') % i) 
+ 
+    elif what == 'filter': 
+        print() 
+        print("Filters:") 
+        print("~~~~~~~~") 
+ 
+        for name in get_all_filters(): 
+            cls = find_filter_class(name) 
+            print("* " + name + ':') 
+            print("    %s" % docstring_headline(cls)) 
+ 
+    elif what == 'style': 
+        print() 
+        print("Styles:") 
+        print("~~~~~~~") 
+ 
+        for name in get_all_styles(): 
+            cls = get_style_by_name(name) 
+            print("* " + name + ':') 
+            print("    %s" % docstring_headline(cls)) 
+ 
+ 
+def main_inner(popts, args, usage): 
+    opts = {} 
+    O_opts = [] 
+    P_opts = [] 
+    F_opts = [] 
+    for opt, arg in popts: 
+        if opt == '-O': 
+            O_opts.append(arg) 
+        elif opt == '-P': 
+            P_opts.append(arg) 
+        elif opt == '-F': 
+            F_opts.append(arg) 
+        opts[opt] = arg 
+ 
+    if opts.pop('-h', None) is not None: 
+        print(usage) 
+        return 0 
+ 
+    if opts.pop('-V', None) is not None: 
         print('Pygments version %s, (c) 2006-2019 by Georg Brandl.' % __version__)
-        return 0
-
-    # handle ``pygmentize -L``
-    L_opt = opts.pop('-L', None)
-    if L_opt is not None:
-        if opts:
-            print(usage, file=sys.stderr)
-            return 2
-
-        # print version
-        main(['', '-V'])
-        if not args:
-            args = ['lexer', 'formatter', 'filter', 'style']
-        for arg in args:
-            _print_list(arg.rstrip('s'))
-        return 0
-
-    # handle ``pygmentize -H``
-    H_opt = opts.pop('-H', None)
-    if H_opt is not None:
-        if opts or len(args) != 2:
-            print(usage, file=sys.stderr)
-            return 2
-
-        what, name = args  # pylint: disable=unbalanced-tuple-unpacking
-        if what not in ('lexer', 'formatter', 'filter'):
-            print(usage, file=sys.stderr)
-            return 2
-
-        return _print_help(what, name)
-
-    # parse -O options
-    parsed_opts = _parse_options(O_opts)
-    opts.pop('-O', None)
-
-    # parse -P options
-    for p_opt in P_opts:
-        try:
-            name, value = p_opt.split('=', 1)
-        except ValueError:
-            parsed_opts[p_opt] = True
-        else:
-            parsed_opts[name] = value
-    opts.pop('-P', None)
-
-    # encodings
-    inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
-    outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
-
-    # handle ``pygmentize -N``
-    infn = opts.pop('-N', None)
-    if infn is not None:
-        lexer = find_lexer_class_for_filename(infn)
-        if lexer is None:
-            lexer = TextLexer
-
-        print(lexer.aliases[0])
-        return 0
-
-    # handle ``pygmentize -S``
-    S_opt = opts.pop('-S', None)
-    a_opt = opts.pop('-a', None)
-    if S_opt is not None:
-        f_opt = opts.pop('-f', None)
-        if not f_opt:
-            print(usage, file=sys.stderr)
-            return 2
-        if opts or args:
-            print(usage, file=sys.stderr)
-            return 2
-
-        try:
-            parsed_opts['style'] = S_opt
-            fmter = get_formatter_by_name(f_opt, **parsed_opts)
-        except ClassNotFound as err:
-            print(err, file=sys.stderr)
-            return 1
-
-        print(fmter.get_style_defs(a_opt or ''))
-        return 0
-
-    # if no -S is given, -a is not allowed
-    if a_opt is not None:
-        print(usage, file=sys.stderr)
-        return 2
-
-    # parse -F options
-    F_opts = _parse_filters(F_opts)
-    opts.pop('-F', None)
-
+        return 0 
+ 
+    # handle ``pygmentize -L`` 
+    L_opt = opts.pop('-L', None) 
+    if L_opt is not None: 
+        if opts: 
+            print(usage, file=sys.stderr) 
+            return 2 
+ 
+        # print version 
+        main(['', '-V']) 
+        if not args: 
+            args = ['lexer', 'formatter', 'filter', 'style'] 
+        for arg in args: 
+            _print_list(arg.rstrip('s')) 
+        return 0 
+ 
+    # handle ``pygmentize -H`` 
+    H_opt = opts.pop('-H', None) 
+    if H_opt is not None: 
+        if opts or len(args) != 2: 
+            print(usage, file=sys.stderr) 
+            return 2 
+ 
+        what, name = args  # pylint: disable=unbalanced-tuple-unpacking 
+        if what not in ('lexer', 'formatter', 'filter'): 
+            print(usage, file=sys.stderr) 
+            return 2 
+ 
+        return _print_help(what, name) 
+ 
+    # parse -O options 
+    parsed_opts = _parse_options(O_opts) 
+    opts.pop('-O', None) 
+ 
+    # parse -P options 
+    for p_opt in P_opts: 
+        try: 
+            name, value = p_opt.split('=', 1) 
+        except ValueError: 
+            parsed_opts[p_opt] = True 
+        else: 
+            parsed_opts[name] = value 
+    opts.pop('-P', None) 
+ 
+    # encodings 
+    inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding')) 
+    outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding')) 
+ 
+    # handle ``pygmentize -N`` 
+    infn = opts.pop('-N', None) 
+    if infn is not None: 
+        lexer = find_lexer_class_for_filename(infn) 
+        if lexer is None: 
+            lexer = TextLexer 
+ 
+        print(lexer.aliases[0]) 
+        return 0 
+ 
+    # handle ``pygmentize -S`` 
+    S_opt = opts.pop('-S', None) 
+    a_opt = opts.pop('-a', None) 
+    if S_opt is not None: 
+        f_opt = opts.pop('-f', None) 
+        if not f_opt: 
+            print(usage, file=sys.stderr) 
+            return 2 
+        if opts or args: 
+            print(usage, file=sys.stderr) 
+            return 2 
+ 
+        try: 
+            parsed_opts['style'] = S_opt 
+            fmter = get_formatter_by_name(f_opt, **parsed_opts) 
+        except ClassNotFound as err: 
+            print(err, file=sys.stderr) 
+            return 1 
+ 
+        print(fmter.get_style_defs(a_opt or '')) 
+        return 0 
+ 
+    # if no -S is given, -a is not allowed 
+    if a_opt is not None: 
+        print(usage, file=sys.stderr) 
+        return 2 
+ 
+    # parse -F options 
+    F_opts = _parse_filters(F_opts) 
+    opts.pop('-F', None) 
+ 
     allow_custom_lexer_formatter = False
     # -x: allow custom (eXternal) lexers and formatters
     if opts.pop('-x', None) is not None:
         allow_custom_lexer_formatter = True
 
-    # select lexer
-    lexer = None
-
-    # given by name?
-    lexername = opts.pop('-l', None)
-    if lexername:
+    # select lexer 
+    lexer = None 
+ 
+    # given by name? 
+    lexername = opts.pop('-l', None) 
+    if lexername: 
         # custom lexer, located relative to user's cwd
         if allow_custom_lexer_formatter and '.py' in lexername:
             try:
@@ -353,82 +353,82 @@ def main_inner(popts, args, usage):
             except (OptionError, ClassNotFound) as err:
                 print('Error:', err, file=sys.stderr)
                 return 1
-
-    # read input code
-    code = None
-
-    if args:
-        if len(args) > 1:
-            print(usage, file=sys.stderr)
-            return 2
-
-        if '-s' in opts:
-            print('Error: -s option not usable when input file specified',
-                  file=sys.stderr)
-            return 2
-
-        infn = args[0]
-        try:
-            with open(infn, 'rb') as infp:
-                code = infp.read()
-        except Exception as err:
-            print('Error: cannot read infile:', err, file=sys.stderr)
-            return 1
-        if not inencoding:
-            code, inencoding = guess_decode(code)
-
-        # do we have to guess the lexer?
-        if not lexer:
-            try:
-                lexer = get_lexer_for_filename(infn, code, **parsed_opts)
-            except ClassNotFound as err:
-                if '-g' in opts:
-                    try:
-                        lexer = guess_lexer(code, **parsed_opts)
-                    except ClassNotFound:
-                        lexer = TextLexer(**parsed_opts)
-                else:
-                    print('Error:', err, file=sys.stderr)
-                    return 1
-            except OptionError as err:
-                print('Error:', err, file=sys.stderr)
-                return 1
-
-    elif '-s' not in opts:  # treat stdin as full file (-s support is later)
-        # read code from terminal, always in binary mode since we want to
-        # decode ourselves and be tolerant with it
-        if sys.version_info > (3,):
-            # Python 3: we have to use .buffer to get a binary stream
-            code = sys.stdin.buffer.read()
-        else:
-            code = sys.stdin.read()
-        if not inencoding:
-            code, inencoding = guess_decode_from_terminal(code, sys.stdin)
-            # else the lexer will do the decoding
-        if not lexer:
-            try:
-                lexer = guess_lexer(code, **parsed_opts)
-            except ClassNotFound:
-                lexer = TextLexer(**parsed_opts)
-
-    else:  # -s option needs a lexer with -l
-        if not lexer:
-            print('Error: when using -s a lexer has to be selected with -l',
-                  file=sys.stderr)
-            return 2
-
-    # process filters
-    for fname, fopts in F_opts:
-        try:
-            lexer.add_filter(fname, **fopts)
-        except ClassNotFound as err:
-            print('Error:', err, file=sys.stderr)
-            return 1
-
-    # select formatter
-    outfn = opts.pop('-o', None)
-    fmter = opts.pop('-f', None)
-    if fmter:
+ 
+    # read input code 
+    code = None 
+ 
+    if args: 
+        if len(args) > 1: 
+            print(usage, file=sys.stderr) 
+            return 2 
+ 
+        if '-s' in opts: 
+            print('Error: -s option not usable when input file specified', 
+                  file=sys.stderr) 
+            return 2 
+ 
+        infn = args[0] 
+        try: 
+            with open(infn, 'rb') as infp: 
+                code = infp.read() 
+        except Exception as err: 
+            print('Error: cannot read infile:', err, file=sys.stderr) 
+            return 1 
+        if not inencoding: 
+            code, inencoding = guess_decode(code) 
+ 
+        # do we have to guess the lexer? 
+        if not lexer: 
+            try: 
+                lexer = get_lexer_for_filename(infn, code, **parsed_opts) 
+            except ClassNotFound as err: 
+                if '-g' in opts: 
+                    try: 
+                        lexer = guess_lexer(code, **parsed_opts) 
+                    except ClassNotFound: 
+                        lexer = TextLexer(**parsed_opts) 
+                else: 
+                    print('Error:', err, file=sys.stderr) 
+                    return 1 
+            except OptionError as err: 
+                print('Error:', err, file=sys.stderr) 
+                return 1 
+ 
+    elif '-s' not in opts:  # treat stdin as full file (-s support is later) 
+        # read code from terminal, always in binary mode since we want to 
+        # decode ourselves and be tolerant with it 
+        if sys.version_info > (3,): 
+            # Python 3: we have to use .buffer to get a binary stream 
+            code = sys.stdin.buffer.read() 
+        else: 
+            code = sys.stdin.read() 
+        if not inencoding: 
+            code, inencoding = guess_decode_from_terminal(code, sys.stdin) 
+            # else the lexer will do the decoding 
+        if not lexer: 
+            try: 
+                lexer = guess_lexer(code, **parsed_opts) 
+            except ClassNotFound: 
+                lexer = TextLexer(**parsed_opts) 
+ 
+    else:  # -s option needs a lexer with -l 
+        if not lexer: 
+            print('Error: when using -s a lexer has to be selected with -l', 
+                  file=sys.stderr) 
+            return 2 
+ 
+    # process filters 
+    for fname, fopts in F_opts: 
+        try: 
+            lexer.add_filter(fname, **fopts) 
+        except ClassNotFound as err: 
+            print('Error:', err, file=sys.stderr) 
+            return 1 
+ 
+    # select formatter 
+    outfn = opts.pop('-o', None) 
+    fmter = opts.pop('-f', None) 
+    if fmter: 
         # custom formatter, located relative to user's cwd
         if allow_custom_lexer_formatter and '.py' in fmter:
             try:
@@ -447,127 +447,127 @@ def main_inner(popts, args, usage):
             except (OptionError, ClassNotFound) as err:
                 print('Error:', err, file=sys.stderr)
                 return 1
-
-    if outfn:
-        if not fmter:
-            try:
-                fmter = get_formatter_for_filename(outfn, **parsed_opts)
-            except (OptionError, ClassNotFound) as err:
-                print('Error:', err, file=sys.stderr)
-                return 1
-        try:
-            outfile = open(outfn, 'wb')
-        except Exception as err:
-            print('Error: cannot open outfile:', err, file=sys.stderr)
-            return 1
-    else:
-        if not fmter:
+ 
+    if outfn: 
+        if not fmter: 
+            try: 
+                fmter = get_formatter_for_filename(outfn, **parsed_opts) 
+            except (OptionError, ClassNotFound) as err: 
+                print('Error:', err, file=sys.stderr) 
+                return 1 
+        try: 
+            outfile = open(outfn, 'wb') 
+        except Exception as err: 
+            print('Error: cannot open outfile:', err, file=sys.stderr) 
+            return 1 
+    else: 
+        if not fmter: 
             if '256' in os.environ.get('TERM', ''):
                 fmter = Terminal256Formatter(**parsed_opts)
             else:
                 fmter = TerminalFormatter(**parsed_opts)
-        if sys.version_info > (3,):
-            # Python 3: we have to use .buffer to get a binary stream
-            outfile = sys.stdout.buffer
-        else:
-            outfile = sys.stdout
-
-    # determine output encoding if not explicitly selected
-    if not outencoding:
-        if outfn:
-            # output file? use lexer encoding for now (can still be None)
-            fmter.encoding = inencoding
-        else:
-            # else use terminal encoding
-            fmter.encoding = terminal_encoding(sys.stdout)
-
-    # provide coloring under Windows, if possible
-    if not outfn and sys.platform in ('win32', 'cygwin') and \
-       fmter.name in ('Terminal', 'Terminal256'):  # pragma: no cover
-        # unfortunately colorama doesn't support binary streams on Py3
-        if sys.version_info > (3,):
-            from pygments.util import UnclosingTextIOWrapper
-            outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
-            fmter.encoding = None
-        try:
-            import colorama.initialise
-        except ImportError:
-            pass
-        else:
-            outfile = colorama.initialise.wrap_stream(
-                outfile, convert=None, strip=None, autoreset=False, wrap=True)
-
-    # When using the LaTeX formatter and the option `escapeinside` is
-    # specified, we need a special lexer which collects escaped text
-    # before running the chosen language lexer.
-    escapeinside = parsed_opts.get('escapeinside', '')
-    if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
-        left = escapeinside[0]
-        right = escapeinside[1]
-        lexer = LatexEmbeddedLexer(left, right, lexer)
-
-    # ... and do it!
-    if '-s' not in opts:
-        # process whole input as per normal...
-        highlight(code, lexer, fmter, outfile)
-        return 0
-    else:
-        # line by line processing of stdin (eg: for 'tail -f')...
-        try:
-            while 1:
-                if sys.version_info > (3,):
-                    # Python 3: we have to use .buffer to get a binary stream
-                    line = sys.stdin.buffer.readline()
-                else:
-                    line = sys.stdin.readline()
-                if not line:
-                    break
-                if not inencoding:
-                    line = guess_decode_from_terminal(line, sys.stdin)[0]
-                highlight(line, lexer, fmter, outfile)
-                if hasattr(outfile, 'flush'):
-                    outfile.flush()
-            return 0
-        except KeyboardInterrupt:  # pragma: no cover
-            return 0
-
-
-def main(args=sys.argv):
-    """
-    Main command line entry point.
-    """
-    usage = USAGE % ((args[0],) * 6)
-
-    try:
+        if sys.version_info > (3,): 
+            # Python 3: we have to use .buffer to get a binary stream 
+            outfile = sys.stdout.buffer 
+        else: 
+            outfile = sys.stdout 
+ 
+    # determine output encoding if not explicitly selected 
+    if not outencoding: 
+        if outfn: 
+            # output file? use lexer encoding for now (can still be None) 
+            fmter.encoding = inencoding 
+        else: 
+            # else use terminal encoding 
+            fmter.encoding = terminal_encoding(sys.stdout) 
+ 
+    # provide coloring under Windows, if possible 
+    if not outfn and sys.platform in ('win32', 'cygwin') and \ 
+       fmter.name in ('Terminal', 'Terminal256'):  # pragma: no cover 
+        # unfortunately colorama doesn't support binary streams on Py3 
+        if sys.version_info > (3,): 
+            from pygments.util import UnclosingTextIOWrapper 
+            outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding) 
+            fmter.encoding = None 
+        try: 
+            import colorama.initialise 
+        except ImportError: 
+            pass 
+        else: 
+            outfile = colorama.initialise.wrap_stream( 
+                outfile, convert=None, strip=None, autoreset=False, wrap=True) 
+ 
+    # When using the LaTeX formatter and the option `escapeinside` is 
+    # specified, we need a special lexer which collects escaped text 
+    # before running the chosen language lexer. 
+    escapeinside = parsed_opts.get('escapeinside', '') 
+    if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter): 
+        left = escapeinside[0] 
+        right = escapeinside[1] 
+        lexer = LatexEmbeddedLexer(left, right, lexer) 
+ 
+    # ... and do it! 
+    if '-s' not in opts: 
+        # process whole input as per normal... 
+        highlight(code, lexer, fmter, outfile) 
+        return 0 
+    else: 
+        # line by line processing of stdin (eg: for 'tail -f')... 
+        try: 
+            while 1: 
+                if sys.version_info > (3,): 
+                    # Python 3: we have to use .buffer to get a binary stream 
+                    line = sys.stdin.buffer.readline() 
+                else: 
+                    line = sys.stdin.readline() 
+                if not line: 
+                    break 
+                if not inencoding: 
+                    line = guess_decode_from_terminal(line, sys.stdin)[0] 
+                highlight(line, lexer, fmter, outfile) 
+                if hasattr(outfile, 'flush'): 
+                    outfile.flush() 
+            return 0 
+        except KeyboardInterrupt:  # pragma: no cover 
+            return 0 
+ 
+ 
+def main(args=sys.argv): 
+    """ 
+    Main command line entry point. 
+    """ 
+    usage = USAGE % ((args[0],) * 6) 
+ 
+    try: 
         popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:vhVHgsx")
-    except getopt.GetoptError:
-        print(usage, file=sys.stderr)
-        return 2
-
-    try:
-        return main_inner(popts, args, usage)
-    except Exception:
-        if '-v' in dict(popts):
-            print(file=sys.stderr)
-            print('*' * 65, file=sys.stderr)
-            print('An unhandled exception occurred while highlighting.',
-                  file=sys.stderr)
-            print('Please report the whole traceback to the issue tracker at',
-                  file=sys.stderr)
+    except getopt.GetoptError: 
+        print(usage, file=sys.stderr) 
+        return 2 
+ 
+    try: 
+        return main_inner(popts, args, usage) 
+    except Exception: 
+        if '-v' in dict(popts): 
+            print(file=sys.stderr) 
+            print('*' * 65, file=sys.stderr) 
+            print('An unhandled exception occurred while highlighting.', 
+                  file=sys.stderr) 
+            print('Please report the whole traceback to the issue tracker at', 
+                  file=sys.stderr) 
             print('<https://github.com/pygments/pygments/issues>.',
-                  file=sys.stderr)
-            print('*' * 65, file=sys.stderr)
-            print(file=sys.stderr)
-            raise
-        import traceback
-        info = traceback.format_exception(*sys.exc_info())
-        msg = info[-1].strip()
-        if len(info) >= 3:
-            # extract relevant file and position info
-            msg += '\n   (f%s)' % info[-2].split('\n')[0].strip()[1:]
-        print(file=sys.stderr)
-        print('*** Error while highlighting:', file=sys.stderr)
-        print(msg, file=sys.stderr)
-        print('*** If this is a bug you want to report, please rerun with -v.',
-              file=sys.stderr)
-        return 1
+                  file=sys.stderr) 
+            print('*' * 65, file=sys.stderr) 
+            print(file=sys.stderr) 
+            raise 
+        import traceback 
+        info = traceback.format_exception(*sys.exc_info()) 
+        msg = info[-1].strip() 
+        if len(info) >= 3: 
+            # extract relevant file and position info 
+            msg += '\n   (f%s)' % info[-2].split('\n')[0].strip()[1:] 
+        print(file=sys.stderr) 
+        print('*** Error while highlighting:', file=sys.stderr) 
+        print(msg, file=sys.stderr) 
+        print('*** If this is a bug you want to report, please rerun with -v.', 
+              file=sys.stderr) 
+        return 1 

+ 57 - 57
contrib/python/Pygments/py2/pygments/console.py

@@ -1,71 +1,71 @@
-# -*- coding: utf-8 -*-
-"""
-    pygments.console
-    ~~~~~~~~~~~~~~~~
-
-    Format colored console output.
-
+# -*- coding: utf-8 -*- 
+""" 
+    pygments.console 
+    ~~~~~~~~~~~~~~~~ 
+ 
+    Format colored console output. 
+ 
     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-"""
-
-esc = "\x1b["
-
-codes = {}
+    :license: BSD, see LICENSE for details. 
+""" 
+ 
+esc = "\x1b[" 
+ 
+codes = {} 
 codes[""] = ""
 codes["reset"] = esc + "39;49;00m"
-
+ 
 codes["bold"] = esc + "01m"
 codes["faint"] = esc + "02m"
 codes["standout"] = esc + "03m"
-codes["underline"] = esc + "04m"
+codes["underline"] = esc + "04m" 
 codes["blink"] = esc + "05m"
 codes["overline"] = esc + "06m"
-
+ 
 dark_colors = ["black", "red", "green", "yellow", "blue",
                "magenta", "cyan", "gray"]
 light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
                 "brightmagenta", "brightcyan", "white"]
-
-x = 30
-for d, l in zip(dark_colors, light_colors):
-    codes[d] = esc + "%im" % x
+ 
+x = 30 
+for d, l in zip(dark_colors, light_colors): 
+    codes[d] = esc + "%im" % x 
     codes[l] = esc + "%im" % (60 + x)
-    x += 1
-
-del d, l, x
-
+    x += 1 
+ 
+del d, l, x 
+ 
 codes["white"] = codes["bold"]
-
-
-def reset_color():
-    return codes["reset"]
-
-
-def colorize(color_key, text):
-    return codes[color_key] + text + codes["reset"]
-
-
-def ansiformat(attr, text):
-    """
-    Format ``text`` with a color and/or some attributes::
-
-        color       normal color
-        *color*     bold color
-        _color_     underlined color
-        +color+     blinking color
-    """
-    result = []
-    if attr[:1] == attr[-1:] == '+':
-        result.append(codes['blink'])
-        attr = attr[1:-1]
-    if attr[:1] == attr[-1:] == '*':
-        result.append(codes['bold'])
-        attr = attr[1:-1]
-    if attr[:1] == attr[-1:] == '_':
-        result.append(codes['underline'])
-        attr = attr[1:-1]
-    result.append(codes[attr])
-    result.append(text)
-    result.append(codes['reset'])
-    return ''.join(result)
+ 
+ 
+def reset_color(): 
+    return codes["reset"] 
+ 
+ 
+def colorize(color_key, text): 
+    return codes[color_key] + text + codes["reset"] 
+ 
+ 
+def ansiformat(attr, text): 
+    """ 
+    Format ``text`` with a color and/or some attributes:: 
+ 
+        color       normal color 
+        *color*     bold color 
+        _color_     underlined color 
+        +color+     blinking color 
+    """ 
+    result = [] 
+    if attr[:1] == attr[-1:] == '+': 
+        result.append(codes['blink']) 
+        attr = attr[1:-1] 
+    if attr[:1] == attr[-1:] == '*': 
+        result.append(codes['bold']) 
+        attr = attr[1:-1] 
+    if attr[:1] == attr[-1:] == '_': 
+        result.append(codes['underline']) 
+        attr = attr[1:-1] 
+    result.append(codes[attr]) 
+    result.append(text) 
+    result.append(codes['reset']) 
+    return ''.join(result) 

+ 69 - 69
contrib/python/Pygments/py2/pygments/filter.py

@@ -1,74 +1,74 @@
-# -*- coding: utf-8 -*-
-"""
-    pygments.filter
-    ~~~~~~~~~~~~~~~
-
-    Module that implements the default filter.
-
+# -*- coding: utf-8 -*- 
+""" 
+    pygments.filter 
+    ~~~~~~~~~~~~~~~ 
+ 
+    Module that implements the default filter. 
+ 
     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-"""
-
-
-def apply_filters(stream, filters, lexer=None):
-    """
-    Use this method to apply an iterable of filters to
-    a stream. If lexer is given it's forwarded to the
-    filter, otherwise the filter receives `None`.
-    """
-    def _apply(filter_, stream):
-        for token in filter_.filter(lexer, stream):
-            yield token
-    for filter_ in filters:
-        stream = _apply(filter_, stream)
-    return stream
-
-
-def simplefilter(f):
-    """
-    Decorator that converts a function into a filter::
-
-        @simplefilter
-        def lowercase(self, lexer, stream, options):
-            for ttype, value in stream:
-                yield ttype, value.lower()
-    """
-    return type(f.__name__, (FunctionFilter,), {
+    :license: BSD, see LICENSE for details. 
+""" 
+ 
+ 
+def apply_filters(stream, filters, lexer=None): 
+    """ 
+    Use this method to apply an iterable of filters to 
+    a stream. If lexer is given it's forwarded to the 
+    filter, otherwise the filter receives `None`. 
+    """ 
+    def _apply(filter_, stream): 
+        for token in filter_.filter(lexer, stream): 
+            yield token 
+    for filter_ in filters: 
+        stream = _apply(filter_, stream) 
+    return stream 
+ 
+ 
+def simplefilter(f): 
+    """ 
+    Decorator that converts a function into a filter:: 
+ 
+        @simplefilter 
+        def lowercase(self, lexer, stream, options): 
+            for ttype, value in stream: 
+                yield ttype, value.lower() 
+    """ 
+    return type(f.__name__, (FunctionFilter,), { 
         '__module__': getattr(f, '__module__'),
         '__doc__': f.__doc__,
         'function': f,
     })
-
-
-class Filter(object):
-    """
-    Default filter. Subclass this class or use the `simplefilter`
-    decorator to create own filters.
-    """
-
-    def __init__(self, **options):
-        self.options = options
-
-    def filter(self, lexer, stream):
-        raise NotImplementedError()
-
-
-class FunctionFilter(Filter):
-    """
-    Abstract class used by `simplefilter` to create simple
-    function filters on the fly. The `simplefilter` decorator
-    automatically creates subclasses of this class for
-    functions passed to it.
-    """
-    function = None
-
-    def __init__(self, **options):
-        if not hasattr(self, 'function'):
-            raise TypeError('%r used without bound function' %
-                            self.__class__.__name__)
-        Filter.__init__(self, **options)
-
-    def filter(self, lexer, stream):
-        # pylint: disable=not-callable
-        for ttype, value in self.function(lexer, stream, self.options):
-            yield ttype, value
+ 
+ 
+class Filter(object): 
+    """ 
+    Default filter. Subclass this class or use the `simplefilter` 
+    decorator to create own filters. 
+    """ 
+ 
+    def __init__(self, **options): 
+        self.options = options 
+ 
+    def filter(self, lexer, stream): 
+        raise NotImplementedError() 
+ 
+ 
+class FunctionFilter(Filter): 
+    """ 
+    Abstract class used by `simplefilter` to create simple 
+    function filters on the fly. The `simplefilter` decorator 
+    automatically creates subclasses of this class for 
+    functions passed to it. 
+    """ 
+    function = None 
+ 
+    def __init__(self, **options): 
+        if not hasattr(self, 'function'): 
+            raise TypeError('%r used without bound function' % 
+                            self.__class__.__name__) 
+        Filter.__init__(self, **options) 
+ 
+    def filter(self, lexer, stream): 
+        # pylint: disable=not-callable 
+        for ttype, value in self.function(lexer, stream, self.options): 
+            yield ttype, value 

+ 349 - 349
contrib/python/Pygments/py2/pygments/filters/__init__.py

@@ -1,350 +1,350 @@
-# -*- coding: utf-8 -*-
-"""
-    pygments.filters
-    ~~~~~~~~~~~~~~~~
-
-    Module containing filter lookup functions and default
-    filters.
-
+# -*- coding: utf-8 -*- 
+""" 
+    pygments.filters 
+    ~~~~~~~~~~~~~~~~ 
+ 
+    Module containing filter lookup functions and default 
+    filters. 
+ 
     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-"""
-
-import re
-
-from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
-    string_to_tokentype
-from pygments.filter import Filter
-from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
-     get_choice_opt, ClassNotFound, OptionError, text_type, string_types
-from pygments.plugin import find_plugin_filters
-
-
-def find_filter_class(filtername):
-    """Lookup a filter by name. Return None if not found."""
-    if filtername in FILTERS:
-        return FILTERS[filtername]
-    for name, cls in find_plugin_filters():
-        if name == filtername:
-            return cls
-    return None
-
-
-def get_filter_by_name(filtername, **options):
-    """Return an instantiated filter.
-
-    Options are passed to the filter initializer if wanted.
-    Raise a ClassNotFound if not found.
-    """
-    cls = find_filter_class(filtername)
-    if cls:
-        return cls(**options)
-    else:
-        raise ClassNotFound('filter %r not found' % filtername)
-
-
-def get_all_filters():
-    """Return a generator of all filter names."""
-    for name in FILTERS:
-        yield name
-    for name, _ in find_plugin_filters():
-        yield name
-
-
-def _replace_special(ttype, value, regex, specialttype,
-                     replacefunc=lambda x: x):
-    last = 0
-    for match in regex.finditer(value):
-        start, end = match.start(), match.end()
-        if start != last:
-            yield ttype, value[last:start]
-        yield specialttype, replacefunc(value[start:end])
-        last = end
-    if last != len(value):
-        yield ttype, value[last:]
-
-
-class CodeTagFilter(Filter):
-    """Highlight special code tags in comments and docstrings.
-
-    Options accepted:
-
-    `codetags` : list of strings
-       A list of strings that are flagged as code tags.  The default is to
-       highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
-    """
-
-    def __init__(self, **options):
-        Filter.__init__(self, **options)
-        tags = get_list_opt(options, 'codetags',
-                            ['XXX', 'TODO', 'BUG', 'NOTE'])
-        self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
-            re.escape(tag) for tag in tags if tag
-        ]))
-
-    def filter(self, lexer, stream):
-        regex = self.tag_re
-        for ttype, value in stream:
-            if ttype in String.Doc or \
-               ttype in Comment and \
-               ttype not in Comment.Preproc:
-                for sttype, svalue in _replace_special(ttype, value, regex,
-                                                       Comment.Special):
-                    yield sttype, svalue
-            else:
-                yield ttype, value
-
-
-class KeywordCaseFilter(Filter):
-    """Convert keywords to lowercase or uppercase or capitalize them, which
-    means first letter uppercase, rest lowercase.
-
-    This can be useful e.g. if you highlight Pascal code and want to adapt the
-    code to your styleguide.
-
-    Options accepted:
-
-    `case` : string
-       The casing to convert keywords to. Must be one of ``'lower'``,
-       ``'upper'`` or ``'capitalize'``.  The default is ``'lower'``.
-    """
-
-    def __init__(self, **options):
-        Filter.__init__(self, **options)
-        case = get_choice_opt(options, 'case',
-                              ['lower', 'upper', 'capitalize'], 'lower')
-        self.convert = getattr(text_type, case)
-
-    def filter(self, lexer, stream):
-        for ttype, value in stream:
-            if ttype in Keyword:
-                yield ttype, self.convert(value)
-            else:
-                yield ttype, value
-
-
-class NameHighlightFilter(Filter):
-    """Highlight a normal Name (and Name.*) token with a different token type.
-
-    Example::
-
-        filter = NameHighlightFilter(
-            names=['foo', 'bar', 'baz'],
-            tokentype=Name.Function,
-        )
-
-    This would highlight the names "foo", "bar" and "baz"
-    as functions. `Name.Function` is the default token type.
-
-    Options accepted:
-
-    `names` : list of strings
-      A list of names that should be given the different token type.
-      There is no default.
-    `tokentype` : TokenType or string
-      A token type or a string containing a token type name that is
-      used for highlighting the strings in `names`.  The default is
-      `Name.Function`.
-    """
-
-    def __init__(self, **options):
-        Filter.__init__(self, **options)
-        self.names = set(get_list_opt(options, 'names', []))
-        tokentype = options.get('tokentype')
-        if tokentype:
-            self.tokentype = string_to_tokentype(tokentype)
-        else:
-            self.tokentype = Name.Function
-
-    def filter(self, lexer, stream):
-        for ttype, value in stream:
-            if ttype in Name and value in self.names:
-                yield self.tokentype, value
-            else:
-                yield ttype, value
-
-
-class ErrorToken(Exception):
-    pass
-
-
-class RaiseOnErrorTokenFilter(Filter):
-    """Raise an exception when the lexer generates an error token.
-
-    Options accepted:
-
-    `excclass` : Exception class
-      The exception class to raise.
-      The default is `pygments.filters.ErrorToken`.
-
-    .. versionadded:: 0.8
-    """
-
-    def __init__(self, **options):
-        Filter.__init__(self, **options)
-        self.exception = options.get('excclass', ErrorToken)
-        try:
-            # issubclass() will raise TypeError if first argument is not a class
-            if not issubclass(self.exception, Exception):
-                raise TypeError
-        except TypeError:
-            raise OptionError('excclass option is not an exception class')
-
-    def filter(self, lexer, stream):
-        for ttype, value in stream:
-            if ttype is Error:
-                raise self.exception(value)
-            yield ttype, value
-
-
-class VisibleWhitespaceFilter(Filter):
-    """Convert tabs, newlines and/or spaces to visible characters.
-
-    Options accepted:
-
-    `spaces` : string or bool
-      If this is a one-character string, spaces will be replaces by this string.
-      If it is another true value, spaces will be replaced by ``·`` (unicode
-      MIDDLE DOT).  If it is a false value, spaces will not be replaced.  The
-      default is ``False``.
-    `tabs` : string or bool
-      The same as for `spaces`, but the default replacement character is ``»``
-      (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK).  The default value
-      is ``False``.  Note: this will not work if the `tabsize` option for the
-      lexer is nonzero, as tabs will already have been expanded then.
-    `tabsize` : int
-      If tabs are to be replaced by this filter (see the `tabs` option), this
-      is the total number of characters that a tab should be expanded to.
-      The default is ``8``.
-    `newlines` : string or bool
-      The same as for `spaces`, but the default replacement character is ``¶``
-      (unicode PILCROW SIGN).  The default value is ``False``.
-    `wstokentype` : bool
-      If true, give whitespace the special `Whitespace` token type.  This allows
-      styling the visible whitespace differently (e.g. greyed out), but it can
-      disrupt background colors.  The default is ``True``.
-
-    .. versionadded:: 0.8
-    """
-
-    def __init__(self, **options):
-        Filter.__init__(self, **options)
-        for name, default in [('spaces',   u'·'),
-                              ('tabs',     u'»'),
-                              ('newlines', u'¶')]:
-            opt = options.get(name, False)
-            if isinstance(opt, string_types) and len(opt) == 1:
-                setattr(self, name, opt)
-            else:
-                setattr(self, name, (opt and default or ''))
-        tabsize = get_int_opt(options, 'tabsize', 8)
-        if self.tabs:
-            self.tabs += ' ' * (tabsize - 1)
-        if self.newlines:
-            self.newlines += '\n'
-        self.wstt = get_bool_opt(options, 'wstokentype', True)
-
-    def filter(self, lexer, stream):
-        if self.wstt:
-            spaces = self.spaces or u' '
-            tabs = self.tabs or u'\t'
-            newlines = self.newlines or u'\n'
-            regex = re.compile(r'\s')
-            def replacefunc(wschar):
-                if wschar == ' ':
-                    return spaces
-                elif wschar == '\t':
-                    return tabs
-                elif wschar == '\n':
-                    return newlines
-                return wschar
-
-            for ttype, value in stream:
-                for sttype, svalue in _replace_special(ttype, value, regex,
-                                                       Whitespace, replacefunc):
-                    yield sttype, svalue
-        else:
-            spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
-            # simpler processing
-            for ttype, value in stream:
-                if spaces:
-                    value = value.replace(' ', spaces)
-                if tabs:
-                    value = value.replace('\t', tabs)
-                if newlines:
-                    value = value.replace('\n', newlines)
-                yield ttype, value
-
-
-class GobbleFilter(Filter):
-    """Gobbles source code lines (eats initial characters).
-
-    This filter drops the first ``n`` characters off every line of code.  This
-    may be useful when the source code fed to the lexer is indented by a fixed
-    amount of space that isn't desired in the output.
-
-    Options accepted:
-
-    `n` : int
-       The number of characters to gobble.
-
-    .. versionadded:: 1.2
-    """
-    def __init__(self, **options):
-        Filter.__init__(self, **options)
-        self.n = get_int_opt(options, 'n', 0)
-
-    def gobble(self, value, left):
-        if left < len(value):
-            return value[left:], 0
-        else:
-            return u'', left - len(value)
-
-    def filter(self, lexer, stream):
-        n = self.n
-        left = n # How many characters left to gobble.
-        for ttype, value in stream:
-            # Remove ``left`` tokens from first line, ``n`` from all others.
-            parts = value.split('\n')
-            (parts[0], left) = self.gobble(parts[0], left)
-            for i in range(1, len(parts)):
-                (parts[i], left) = self.gobble(parts[i], n)
-            value = u'\n'.join(parts)
-
-            if value != '':
-                yield ttype, value
-
-
-class TokenMergeFilter(Filter):
-    """Merges consecutive tokens with the same token type in the output
-    stream of a lexer.
-
-    .. versionadded:: 1.2
-    """
-    def __init__(self, **options):
-        Filter.__init__(self, **options)
-
-    def filter(self, lexer, stream):
-        current_type = None
-        current_value = None
-        for ttype, value in stream:
-            if ttype is current_type:
-                current_value += value
-            else:
-                if current_type is not None:
-                    yield current_type, current_value
-                current_type = ttype
-                current_value = value
-        if current_type is not None:
-            yield current_type, current_value
-
-
-FILTERS = {
-    'codetagify':     CodeTagFilter,
-    'keywordcase':    KeywordCaseFilter,
-    'highlight':      NameHighlightFilter,
-    'raiseonerror':   RaiseOnErrorTokenFilter,
-    'whitespace':     VisibleWhitespaceFilter,
-    'gobble':         GobbleFilter,
-    'tokenmerge':     TokenMergeFilter,
-}
+    :license: BSD, see LICENSE for details. 
+""" 
+ 
+import re 
+ 
+from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \ 
+    string_to_tokentype 
+from pygments.filter import Filter 
+from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \ 
+     get_choice_opt, ClassNotFound, OptionError, text_type, string_types 
+from pygments.plugin import find_plugin_filters 
+ 
+ 
+def find_filter_class(filtername): 
+    """Lookup a filter by name. Return None if not found.""" 
+    if filtername in FILTERS: 
+        return FILTERS[filtername] 
+    for name, cls in find_plugin_filters(): 
+        if name == filtername: 
+            return cls 
+    return None 
+ 
+ 
+def get_filter_by_name(filtername, **options): 
+    """Return an instantiated filter. 
+ 
+    Options are passed to the filter initializer if wanted. 
+    Raise a ClassNotFound if not found. 
+    """ 
+    cls = find_filter_class(filtername) 
+    if cls: 
+        return cls(**options) 
+    else: 
+        raise ClassNotFound('filter %r not found' % filtername) 
+ 
+ 
+def get_all_filters(): 
+    """Return a generator of all filter names.""" 
+    for name in FILTERS: 
+        yield name 
+    for name, _ in find_plugin_filters(): 
+        yield name 
+ 
+ 
+def _replace_special(ttype, value, regex, specialttype, 
+                     replacefunc=lambda x: x): 
+    last = 0 
+    for match in regex.finditer(value): 
+        start, end = match.start(), match.end() 
+        if start != last: 
+            yield ttype, value[last:start] 
+        yield specialttype, replacefunc(value[start:end]) 
+        last = end 
+    if last != len(value): 
+        yield ttype, value[last:] 
+ 
+ 
+class CodeTagFilter(Filter): 
+    """Highlight special code tags in comments and docstrings. 
+ 
+    Options accepted: 
+ 
+    `codetags` : list of strings 
+       A list of strings that are flagged as code tags.  The default is to 
+       highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``. 
+    """ 
+ 
+    def __init__(self, **options): 
+        Filter.__init__(self, **options) 
+        tags = get_list_opt(options, 'codetags', 
+                            ['XXX', 'TODO', 'BUG', 'NOTE']) 
+        self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([ 
+            re.escape(tag) for tag in tags if tag 
+        ])) 
+ 
+    def filter(self, lexer, stream): 
+        regex = self.tag_re 
+        for ttype, value in stream: 
+            if ttype in String.Doc or \ 
+               ttype in Comment and \ 
+               ttype not in Comment.Preproc: 
+                for sttype, svalue in _replace_special(ttype, value, regex, 
+                                                       Comment.Special): 
+                    yield sttype, svalue 
+            else: 
+                yield ttype, value 
+ 
+ 
+class KeywordCaseFilter(Filter): 
+    """Convert keywords to lowercase or uppercase or capitalize them, which 
+    means first letter uppercase, rest lowercase. 
+ 
+    This can be useful e.g. if you highlight Pascal code and want to adapt the 
+    code to your styleguide. 
+ 
+    Options accepted: 
+ 
+    `case` : string 
+       The casing to convert keywords to. Must be one of ``'lower'``, 
+       ``'upper'`` or ``'capitalize'``.  The default is ``'lower'``. 
+    """ 
+ 
+    def __init__(self, **options): 
+        Filter.__init__(self, **options) 
+        case = get_choice_opt(options, 'case', 
+                              ['lower', 'upper', 'capitalize'], 'lower') 
+        self.convert = getattr(text_type, case) 
+ 
+    def filter(self, lexer, stream): 
+        for ttype, value in stream: 
+            if ttype in Keyword: 
+                yield ttype, self.convert(value) 
+            else: 
+                yield ttype, value 
+ 
+ 
+class NameHighlightFilter(Filter): 
+    """Highlight a normal Name (and Name.*) token with a different token type. 
+ 
+    Example:: 
+ 
+        filter = NameHighlightFilter( 
+            names=['foo', 'bar', 'baz'], 
+            tokentype=Name.Function, 
+        ) 
+ 
+    This would highlight the names "foo", "bar" and "baz" 
+    as functions. `Name.Function` is the default token type. 
+ 
+    Options accepted: 
+ 
+    `names` : list of strings 
+      A list of names that should be given the different token type. 
+      There is no default. 
+    `tokentype` : TokenType or string 
+      A token type or a string containing a token type name that is 
+      used for highlighting the strings in `names`.  The default is 
+      `Name.Function`. 
+    """ 
+ 
+    def __init__(self, **options): 
+        Filter.__init__(self, **options) 
+        self.names = set(get_list_opt(options, 'names', [])) 
+        tokentype = options.get('tokentype') 
+        if tokentype: 
+            self.tokentype = string_to_tokentype(tokentype) 
+        else: 
+            self.tokentype = Name.Function 
+ 
+    def filter(self, lexer, stream): 
+        for ttype, value in stream: 
+            if ttype in Name and value in self.names: 
+                yield self.tokentype, value 
+            else: 
+                yield ttype, value 
+ 
+ 
+class ErrorToken(Exception): 
+    pass 
+ 
+ 
+class RaiseOnErrorTokenFilter(Filter): 
+    """Raise an exception when the lexer generates an error token. 
+ 
+    Options accepted: 
+ 
+    `excclass` : Exception class 
+      The exception class to raise. 
+      The default is `pygments.filters.ErrorToken`. 
+ 
+    .. versionadded:: 0.8 
+    """ 
+ 
+    def __init__(self, **options): 
+        Filter.__init__(self, **options) 
+        self.exception = options.get('excclass', ErrorToken) 
+        try: 
+            # issubclass() will raise TypeError if first argument is not a class 
+            if not issubclass(self.exception, Exception): 
+                raise TypeError 
+        except TypeError: 
+            raise OptionError('excclass option is not an exception class') 
+ 
+    def filter(self, lexer, stream): 
+        for ttype, value in stream: 
+            if ttype is Error: 
+                raise self.exception(value) 
+            yield ttype, value 
+ 
+ 
+class VisibleWhitespaceFilter(Filter): 
+    """Convert tabs, newlines and/or spaces to visible characters. 
+ 
+    Options accepted: 
+ 
+    `spaces` : string or bool 
+      If this is a one-character string, spaces will be replaces by this string. 
+      If it is another true value, spaces will be replaced by ``·`` (unicode 
+      MIDDLE DOT).  If it is a false value, spaces will not be replaced.  The 
+      default is ``False``. 
+    `tabs` : string or bool 
+      The same as for `spaces`, but the default replacement character is ``»`` 
+      (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK).  The default value 
+      is ``False``.  Note: this will not work if the `tabsize` option for the 
+      lexer is nonzero, as tabs will already have been expanded then. 
+    `tabsize` : int 
+      If tabs are to be replaced by this filter (see the `tabs` option), this 
+      is the total number of characters that a tab should be expanded to. 
+      The default is ``8``. 
+    `newlines` : string or bool 
+      The same as for `spaces`, but the default replacement character is ``¶`` 
+      (unicode PILCROW SIGN).  The default value is ``False``. 
+    `wstokentype` : bool 
+      If true, give whitespace the special `Whitespace` token type.  This allows 
+      styling the visible whitespace differently (e.g. greyed out), but it can 
+      disrupt background colors.  The default is ``True``. 
+ 
+    .. versionadded:: 0.8 
+    """ 
+ 
+    def __init__(self, **options): 
+        Filter.__init__(self, **options) 
+        for name, default in [('spaces',   u'·'), 
+                              ('tabs',     u'»'), 
+                              ('newlines', u'¶')]: 
+            opt = options.get(name, False) 
+            if isinstance(opt, string_types) and len(opt) == 1: 
+                setattr(self, name, opt) 
+            else: 
+                setattr(self, name, (opt and default or '')) 
+        tabsize = get_int_opt(options, 'tabsize', 8) 
+        if self.tabs: 
+            self.tabs += ' ' * (tabsize - 1) 
+        if self.newlines: 
+            self.newlines += '\n' 
+        self.wstt = get_bool_opt(options, 'wstokentype', True) 
+ 
+    def filter(self, lexer, stream): 
+        if self.wstt: 
+            spaces = self.spaces or u' ' 
+            tabs = self.tabs or u'\t' 
+            newlines = self.newlines or u'\n' 
+            regex = re.compile(r'\s') 
+            def replacefunc(wschar): 
+                if wschar == ' ': 
+                    return spaces 
+                elif wschar == '\t': 
+                    return tabs 
+                elif wschar == '\n': 
+                    return newlines 
+                return wschar 
+ 
+            for ttype, value in stream: 
+                for sttype, svalue in _replace_special(ttype, value, regex, 
+                                                       Whitespace, replacefunc): 
+                    yield sttype, svalue 
+        else: 
+            spaces, tabs, newlines = self.spaces, self.tabs, self.newlines 
+            # simpler processing 
+            for ttype, value in stream: 
+                if spaces: 
+                    value = value.replace(' ', spaces) 
+                if tabs: 
+                    value = value.replace('\t', tabs) 
+                if newlines: 
+                    value = value.replace('\n', newlines) 
+                yield ttype, value 
+ 
+ 
+class GobbleFilter(Filter): 
+    """Gobbles source code lines (eats initial characters). 
+ 
+    This filter drops the first ``n`` characters off every line of code.  This 
+    may be useful when the source code fed to the lexer is indented by a fixed 
+    amount of space that isn't desired in the output. 
+ 
+    Options accepted: 
+ 
+    `n` : int 
+       The number of characters to gobble. 
+ 
+    .. versionadded:: 1.2 
+    """ 
+    def __init__(self, **options): 
+        Filter.__init__(self, **options) 
+        self.n = get_int_opt(options, 'n', 0) 
+ 
+    def gobble(self, value, left): 
+        if left < len(value): 
+            return value[left:], 0 
+        else: 
+            return u'', left - len(value) 
+ 
+    def filter(self, lexer, stream): 
+        n = self.n 
+        left = n # How many characters left to gobble. 
+        for ttype, value in stream: 
+            # Remove ``left`` tokens from first line, ``n`` from all others. 
+            parts = value.split('\n') 
+            (parts[0], left) = self.gobble(parts[0], left) 
+            for i in range(1, len(parts)): 
+                (parts[i], left) = self.gobble(parts[i], n) 
+            value = u'\n'.join(parts) 
+ 
+            if value != '': 
+                yield ttype, value 
+ 
+ 
+class TokenMergeFilter(Filter): 
+    """Merges consecutive tokens with the same token type in the output 
+    stream of a lexer. 
+ 
+    .. versionadded:: 1.2 
+    """ 
+    def __init__(self, **options): 
+        Filter.__init__(self, **options) 
+ 
+    def filter(self, lexer, stream): 
+        current_type = None 
+        current_value = None 
+        for ttype, value in stream: 
+            if ttype is current_type: 
+                current_value += value 
+            else: 
+                if current_type is not None: 
+                    yield current_type, current_value 
+                current_type = ttype 
+                current_value = value 
+        if current_type is not None: 
+            yield current_type, current_value 
+ 
+ 
+FILTERS = { 
+    'codetagify':     CodeTagFilter, 
+    'keywordcase':    KeywordCaseFilter, 
+    'highlight':      NameHighlightFilter, 
+    'raiseonerror':   RaiseOnErrorTokenFilter, 
+    'whitespace':     VisibleWhitespaceFilter, 
+    'gobble':         GobbleFilter, 
+    'tokenmerge':     TokenMergeFilter, 
+} 

Some files were not shown because too many files changed in this diff