Browse Source

Restoring authorship annotation for <selivni@yandex-team.ru>. Commit 1 of 2.

selivni 3 years ago
parent
commit
fd7c757f12

+ 4 - 4
build/conf/project_specific/maps/mapkit.conf

@@ -218,10 +218,10 @@ when ($MAPSMOBI_BUILD_TARGET && $OS_ANDROID) {
         CFLAGS+=-fvisibility=default
     }
 
-    when ($NO_DEBUGINFO != "yes" && $REDUCED_DEBUGINFO) {
-        CFLAGS+=-g1
-    }
-
+    when ($NO_DEBUGINFO != "yes" && $REDUCED_DEBUGINFO) { 
+        CFLAGS+=-g1 
+    } 
+ 
     CFLAGS+=-DANDROID -fpic -ffunction-sections -funwind-tables -fstack-protector -no-canonical-prefixes -Wa,--noexecstack
     CFLAGS+=-Qunused-arguments -Wno-unused-function -Wno-pessimizing-move -Wno-missing-field-initializers -Wno-missing-braces -Wno-unused-parameter -Wno-vexing-parse -Wno-sign-compare -Wno-deprecated-declarations
 

+ 126 - 126
build/scripts/gen_aar_gradle_script.py

@@ -6,16 +6,16 @@ FLAT_DIRS_REPO_TEMPLATE='flatDir {{ dirs {dirs} }}\n'
 MAVEN_REPO_TEMPLATE='maven {{ url "{repo}" }}\n'
 KEYSTORE_TEMLATE='signingConfigs {{ debug {{ storeFile file("{keystore}") }} }}\n'
 
-ENABLE_JAVADOC = 'tasks["bundle${suffix}Aar"].dependsOn packageJavadocTask'
-DO_NOT_STRIP = '''\
-    packagingOptions {
-        doNotStrip "*/arm64-v8a/*.so"
-        doNotStrip "*/armeabi-v7a/*.so"
-        doNotStrip "*/x86_64/*.so"
-        doNotStrip "*/x86/*.so"
-    }
-'''
-
+ENABLE_JAVADOC = 'tasks["bundle${suffix}Aar"].dependsOn packageJavadocTask' 
+DO_NOT_STRIP = '''\ 
+    packagingOptions { 
+        doNotStrip "*/arm64-v8a/*.so" 
+        doNotStrip "*/armeabi-v7a/*.so" 
+        doNotStrip "*/x86_64/*.so" 
+        doNotStrip "*/x86/*.so" 
+    } 
+''' 
+ 
 AAR_TEMPLATE = """\
 ext.jniLibsDirs = [
     {jni_libs_dirs}
@@ -55,13 +55,13 @@ def targetVersion = 30
 def buildVersion = '30.0.3'
 
 import com.android.build.gradle.LibraryPlugin
-import java.nio.file.Files
-import java.nio.file.Paths
+import java.nio.file.Files 
+import java.nio.file.Paths 
 import java.util.regex.Matcher
 import java.util.regex.Pattern
-import java.util.zip.ZipFile
-
+import java.util.zip.ZipFile 
 
+ 
 apply plugin: 'com.github.dcendents.android-maven'
 
 buildDir = "$projectDir/build"
@@ -128,8 +128,8 @@ android {{
         androidTest.setRoot('bundle/tests')
     }}
 
-    {do_not_strip}
-
+    {do_not_strip} 
+ 
     dependencies {{
         for (bundle in bundles)
             compile("$bundle") {{
@@ -181,90 +181,90 @@ android {{
         tasks["bundle${{suffix}}Aar"].dependsOn sourcesJarTask
         tasks["bundle${{suffix}}Aar"].dependsOn writePomTask
     }}
-
-    android.libraryVariants.all {{ variant ->
-        def capitalizedVariantName = variant.name.capitalize()
-        def suffix = variant.buildType.name.capitalize()
-
-        def javadocTask = project.tasks.create(name: "generate${{capitalizedVariantName}}Javadoc", type: Javadoc) {{
-            group = "Javadoc"
-            description "Generates Javadoc for $capitalizedVariantName"
-
-            title = "Yandex documentation"
-
-            source = android.sourceSets.main.java.srcDirs
-            include "**/*/yandex/*/**"
-            // TODO: remove this when we support internal doc exclusion in IDL
-            // https://st.yandex-team.ru/MAPSMOBCORE-11364
-            exclude "**/internal/**"
-
-            ext.androidJar = "${{android.sdkDirectory.path}}/platforms/${{android.compileSdkVersion}}/android.jar"
-            classpath =
-                files(android.getBootClasspath().join(File.pathSeparator)) +
-                configurations.compile +
-                files(ext.androidJar) +
-                files(variant.javaCompile.outputs.files)
-
-            destinationDir = file("$buildDir/${{rootProject.name}}-javadoc/$capitalizedVariantName/")
-
-            options.doclet("ExcludeDoclet")
-            options.docletpath(
-                files(repositories.maven.url).getAsFileTree()
-                    .matching{{include "**/exclude-doclet-1.0.0.jar"}}
-                        .getSingleFile())
-
-            options.charSet = "UTF-8"
-            options.encoding = "UTF-8"
-
-            failOnError false
-
-            afterEvaluate {{
-                def dependencyTree = project.configurations.compile.getAsFileTree()
-                def aar_set = dependencyTree.matching{{include "**/*.aar"}}.getFiles()
-                def jar_tree = dependencyTree.matching{{include "**/*.jar"}}
-
-                aar_set.each{{ aar ->
-                    def outputPath = "$buildDir/tmp/aarJar/${{aar.name.replace('.aar', '.jar')}}"
-                    classpath += files(outputPath)
-
-                    dependsOn task(name: "extract_${{aar.getAbsolutePath().replace(File.separatorChar, '_' as char)}}-${{capitalizedVariantName}}").doLast {{
-                        extractClassesJar(aar, outputPath)
-                    }}
-                }}
-            }}
-        }}
-
-        def packageJavadocTask = project.tasks.create(name: "package${{capitalizedVariantName}}Javadoc", type: Tar) {{
-            description "Makes an archive from Javadoc output"
-            from "${{buildDir}}/${{rootProject.name}}-javadoc/$capitalizedVariantName/"
-            archiveFileName = "${{rootProject.name}}-javadoc.tar.gz"
-            destinationDirectory = new File("${{buildDir}}")
-            dependsOn javadocTask
-        }}
-
-        {enable_javadoc}
-    }}
-
-}}
-
-private def extractClassesJar(aarPath, outputPath) {{
-    if (!aarPath.exists()) {{
-        throw new GradleException("AAR $aarPath not found")
-    }}
-
-    def zip = new ZipFile(aarPath)
-    zip.entries().each {{
-        if (it.name == "classes.jar") {{
-            def path = Paths.get(outputPath)
-            if (!Files.exists(path)) {{
-                Files.createDirectories(path.getParent())
-                Files.copy(zip.getInputStream(it), path)
-            }}
-        }}
-    }}
-    zip.close()
+ 
+    android.libraryVariants.all {{ variant -> 
+        def capitalizedVariantName = variant.name.capitalize() 
+        def suffix = variant.buildType.name.capitalize() 
+ 
+        def javadocTask = project.tasks.create(name: "generate${{capitalizedVariantName}}Javadoc", type: Javadoc) {{ 
+            group = "Javadoc" 
+            description "Generates Javadoc for $capitalizedVariantName" 
+ 
+            title = "Yandex documentation" 
+ 
+            source = android.sourceSets.main.java.srcDirs 
+            include "**/*/yandex/*/**" 
+            // TODO: remove this when we support internal doc exclusion in IDL 
+            // https://st.yandex-team.ru/MAPSMOBCORE-11364 
+            exclude "**/internal/**" 
+ 
+            ext.androidJar = "${{android.sdkDirectory.path}}/platforms/${{android.compileSdkVersion}}/android.jar" 
+            classpath = 
+                files(android.getBootClasspath().join(File.pathSeparator)) + 
+                configurations.compile + 
+                files(ext.androidJar) + 
+                files(variant.javaCompile.outputs.files) 
+ 
+            destinationDir = file("$buildDir/${{rootProject.name}}-javadoc/$capitalizedVariantName/") 
+ 
+            options.doclet("ExcludeDoclet") 
+            options.docletpath( 
+                files(repositories.maven.url).getAsFileTree() 
+                    .matching{{include "**/exclude-doclet-1.0.0.jar"}} 
+                        .getSingleFile()) 
+ 
+            options.charSet = "UTF-8" 
+            options.encoding = "UTF-8" 
+ 
+            failOnError false 
+ 
+            afterEvaluate {{ 
+                def dependencyTree = project.configurations.compile.getAsFileTree() 
+                def aar_set = dependencyTree.matching{{include "**/*.aar"}}.getFiles() 
+                def jar_tree = dependencyTree.matching{{include "**/*.jar"}} 
+ 
+                aar_set.each{{ aar -> 
+                    def outputPath = "$buildDir/tmp/aarJar/${{aar.name.replace('.aar', '.jar')}}" 
+                    classpath += files(outputPath) 
+ 
+                    dependsOn task(name: "extract_${{aar.getAbsolutePath().replace(File.separatorChar, '_' as char)}}-${{capitalizedVariantName}}").doLast {{ 
+                        extractClassesJar(aar, outputPath) 
+                    }} 
+                }} 
+            }} 
+        }} 
+ 
+        def packageJavadocTask = project.tasks.create(name: "package${{capitalizedVariantName}}Javadoc", type: Tar) {{ 
+            description "Makes an archive from Javadoc output" 
+            from "${{buildDir}}/${{rootProject.name}}-javadoc/$capitalizedVariantName/" 
+            archiveFileName = "${{rootProject.name}}-javadoc.tar.gz" 
+            destinationDirectory = new File("${{buildDir}}") 
+            dependsOn javadocTask 
+        }} 
+ 
+        {enable_javadoc} 
+    }} 
+ 
 }}
-
+ 
+private def extractClassesJar(aarPath, outputPath) {{ 
+    if (!aarPath.exists()) {{ 
+        throw new GradleException("AAR $aarPath not found") 
+    }} 
+ 
+    def zip = new ZipFile(aarPath) 
+    zip.entries().each {{ 
+        if (it.name == "classes.jar") {{ 
+            def path = Paths.get(outputPath) 
+            if (!Files.exists(path)) {{ 
+                Files.createDirectories(path.getParent()) 
+                Files.copy(zip.getInputStream(it), path) 
+            }} 
+        }} 
+    }} 
+    zip.close() 
+}} 
+ 
 """
 
 
@@ -295,53 +295,53 @@ def gen_build_script(args):
     else:
         keystore = ''
 
-    if args.generate_doc:
-        enable_javadoc = ENABLE_JAVADOC
-    else:
-        enable_javadoc = ''
-
-    if args.do_not_strip:
-        do_not_strip = DO_NOT_STRIP
-    else:
-        do_not_strip = ''
-
+    if args.generate_doc: 
+        enable_javadoc = ENABLE_JAVADOC 
+    else: 
+        enable_javadoc = '' 
+ 
+    if args.do_not_strip: 
+        do_not_strip = DO_NOT_STRIP 
+    else: 
+        do_not_strip = '' 
+ 
     return AAR_TEMPLATE.format(
-        aars=wrap(args.aars),
+        aars=wrap(args.aars), 
         compile_only_aars=wrap(args.compile_only_aars),
-        aidl_dirs=wrap(args.aidl_dirs),
+        aidl_dirs=wrap(args.aidl_dirs), 
         assets_dirs=wrap(args.assets_dirs),
-        bundles=wrap(bundles),
-        do_not_strip=do_not_strip,
-        enable_javadoc=enable_javadoc,
-        flat_dirs_repo=flat_dirs_repo,
+        bundles=wrap(bundles), 
+        do_not_strip=do_not_strip, 
+        enable_javadoc=enable_javadoc, 
+        flat_dirs_repo=flat_dirs_repo, 
         java_dirs=wrap(args.java_dirs),
-        jni_libs_dirs=wrap(args.jni_libs_dirs),
-        keystore=keystore,
+        jni_libs_dirs=wrap(args.jni_libs_dirs), 
+        keystore=keystore, 
         manifest=args.manifest,
         maven_repos=maven_repos,
-        proguard_rules=args.proguard_rules,
-        res_dirs=wrap(args.res_dirs),
+        proguard_rules=args.proguard_rules, 
+        res_dirs=wrap(args.res_dirs), 
     )
 
 
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
-    parser.add_argument('--aars', nargs='*', default=[])
+    parser.add_argument('--aars', nargs='*', default=[]) 
     parser.add_argument('--compile-only-aars', nargs='*', default=[])
     parser.add_argument('--aidl-dirs', nargs='*', default=[])
     parser.add_argument('--assets-dirs', nargs='*', default=[])
-    parser.add_argument('--bundle-name', nargs='?', default='default-bundle-name')
+    parser.add_argument('--bundle-name', nargs='?', default='default-bundle-name') 
     parser.add_argument('--bundles', nargs='*', default=[])
-    parser.add_argument('--do-not-strip', action='store_true')
-    parser.add_argument('--flat-repos', nargs='*', default=[])
-    parser.add_argument('--generate-doc', action='store_true')
+    parser.add_argument('--do-not-strip', action='store_true') 
+    parser.add_argument('--flat-repos', nargs='*', default=[]) 
+    parser.add_argument('--generate-doc', action='store_true') 
     parser.add_argument('--java-dirs', nargs='*', default=[])
     parser.add_argument('--jni-libs-dirs', nargs='*', default=[])
-    parser.add_argument('--keystore', default=None)
+    parser.add_argument('--keystore', default=None) 
     parser.add_argument('--manifest', required=True)
     parser.add_argument('--maven-repos', nargs='*', default=[])
     parser.add_argument('--output-dir', required=True)
-    parser.add_argument('--peers', nargs='*', default=[])
+    parser.add_argument('--peers', nargs='*', default=[]) 
     parser.add_argument('--proguard-rules', nargs='?', default=None)
     parser.add_argument('--res-dirs', nargs='*', default=[])
     args = parser.parse_args()

+ 214 - 214
contrib/libs/curl/lib/asyn-thread.c

@@ -84,60 +84,60 @@ struct resdata {
   struct curltime start;
 };
 
-/* Doubly linked list of orphaned thread handles. */
-struct thread_list {
-  curl_thread_t handle;
-
-  /* 'exiting' is set true right before an orphaned thread exits.
-     it should only be set by the orphaned thread from
-     signal_orphan_is_exiting(). */
-  bool exiting;
-
-  struct thread_list *prev, *next;
-};
-
-/* Orphaned threads: A global list of resolver threads that could not be
- * completed in time and so they were abandoned by their parent. The list is
- * culled periodically by soon-to-be exiting orphans to wait on and destroy
- * those that are in the process of or have since exited, which is fast. On
- * global cleanup we wait on and destroy any remaining threads, which may be
- * slow but at that point we cannot defer it any longer.
- */
-struct orphaned_threads {
-  /* Mutex to lock this. To avoid deadlock the thread-specific thread_sync_data
-     mutex cannot be used as an inner lock when orphaned_threads is locked. */
-  curl_mutex_t mutex;
-
-  /* List of orphaned threads. */
-  struct thread_list *first, *last;
-
-  /* Count of threads in the list that are in the process of or have exited.
-     (ie .exiting member of the thread_list item is set true) */
-  size_t exiting_count;
-};
-
-static struct orphaned_threads orphaned_threads;
-
-/* Flags for wait_and_destroy_orphaned_threads().
-   They're documented above the function definition. */
-#define WAIT_DESTROY_ALL                   (1<<0)
-#define WAIT_DESTROY_EXITING_THREADS_ONLY  (1<<1)
-
-static void wait_and_destroy_orphaned_threads(int flags);
-static void signal_orphan_is_exiting(struct thread_list *orphan);
-
-
+/* Doubly linked list of orphaned thread handles. */ 
+struct thread_list { 
+  curl_thread_t handle; 
+ 
+  /* 'exiting' is set true right before an orphaned thread exits. 
+     it should only be set by the orphaned thread from 
+     signal_orphan_is_exiting(). */ 
+  bool exiting; 
+ 
+  struct thread_list *prev, *next; 
+}; 
+ 
+/* Orphaned threads: A global list of resolver threads that could not be 
+ * completed in time and so they were abandoned by their parent. The list is 
+ * culled periodically by soon-to-be exiting orphans to wait on and destroy 
+ * those that are in the process of or have since exited, which is fast. On 
+ * global cleanup we wait on and destroy any remaining threads, which may be 
+ * slow but at that point we cannot defer it any longer. 
+ */ 
+struct orphaned_threads { 
+  /* Mutex to lock this. To avoid deadlock the thread-specific thread_sync_data 
+     mutex cannot be used as an inner lock when orphaned_threads is locked. */ 
+  curl_mutex_t mutex; 
+ 
+  /* List of orphaned threads. */ 
+  struct thread_list *first, *last; 
+ 
+  /* Count of threads in the list that are in the process of or have exited. 
+     (ie .exiting member of the thread_list item is set true) */ 
+  size_t exiting_count; 
+}; 
+ 
+static struct orphaned_threads orphaned_threads; 
+ 
+/* Flags for wait_and_destroy_orphaned_threads(). 
+   They're documented above the function definition. */ 
+#define WAIT_DESTROY_ALL                   (1<<0) 
+#define WAIT_DESTROY_EXITING_THREADS_ONLY  (1<<1) 
+ 
+static void wait_and_destroy_orphaned_threads(int flags); 
+static void signal_orphan_is_exiting(struct thread_list *orphan); 
+ 
+ 
 /*
  * Curl_resolver_global_init()
  * Called from curl_global_init() to initialize global resolver environment.
  */
 int Curl_resolver_global_init(void)
 {
-  memset(&orphaned_threads, 0, sizeof(orphaned_threads));
-
-  if(Curl_mutex_init(&orphaned_threads.mutex))
-    return CURLE_FAILED_INIT;
-
+  memset(&orphaned_threads, 0, sizeof(orphaned_threads)); 
+ 
+  if(Curl_mutex_init(&orphaned_threads.mutex)) 
+    return CURLE_FAILED_INIT; 
+ 
   return CURLE_OK;
 }
 
@@ -147,11 +147,11 @@ int Curl_resolver_global_init(void)
  */
 void Curl_resolver_global_cleanup(void)
 {
-  /* Take ownership of all orphaned resolver threads and wait for them to exit.
-     This is necessary because the user may choose to unload the shared library
-     that is/contains libcurl. */
-  wait_and_destroy_orphaned_threads(WAIT_DESTROY_ALL);
-  Curl_mutex_destroy(&orphaned_threads.mutex);
+  /* Take ownership of all orphaned resolver threads and wait for them to exit. 
+     This is necessary because the user may choose to unload the shared library 
+     that is/contains libcurl. */ 
+  wait_and_destroy_orphaned_threads(WAIT_DESTROY_ALL); 
+  Curl_mutex_destroy(&orphaned_threads.mutex); 
 }
 
 /*
@@ -232,8 +232,8 @@ struct thread_data {
   unsigned int poll_interval;
   timediff_t interval_end;
   struct thread_sync_data tsd;
-  /* 'reserved' memory must be available in case the thread is orphaned */
-  void *reserved;
+  /* 'reserved' memory must be available in case the thread is orphaned */ 
+  void *reserved; 
 };
 
 static struct thread_sync_data *conn_thread_sync_data(struct connectdata *conn)
@@ -295,11 +295,11 @@ int init_thread_sync_data(struct thread_data *td,
   if(tsd->mtx == NULL)
     goto err_exit;
 
-  if(Curl_mutex_init(tsd->mtx)) {
-    free(tsd->mtx);
-    tsd->mtx = NULL;
-    goto err_exit;
-  }
+  if(Curl_mutex_init(tsd->mtx)) { 
+    free(tsd->mtx); 
+    tsd->mtx = NULL; 
+    goto err_exit; 
+  } 
 
 #ifdef USE_SOCKETPAIR
   /* create socket pair, avoid AF_LOCAL since it doesn't build on Solaris */
@@ -353,7 +353,7 @@ static unsigned int CURL_STDCALL getaddrinfo_thread(void *arg)
 {
   struct thread_sync_data *tsd = (struct thread_sync_data *)arg;
   struct thread_data *td = tsd->td;
-  struct thread_list *orphan = NULL;
+  struct thread_list *orphan = NULL; 
   char service[12];
   int rc;
 #ifdef USE_SOCKETPAIR
@@ -378,7 +378,7 @@ static unsigned int CURL_STDCALL getaddrinfo_thread(void *arg)
     /* too late, gotta clean up the mess */
     Curl_mutex_release(tsd->mtx);
     destroy_thread_sync_data(tsd);
-    orphan = (struct thread_list *)td->reserved;
+    orphan = (struct thread_list *)td->reserved; 
     free(td);
   }
   else {
@@ -396,9 +396,9 @@ static unsigned int CURL_STDCALL getaddrinfo_thread(void *arg)
     Curl_mutex_release(tsd->mtx);
   }
 
-  if(orphan)
-    signal_orphan_is_exiting(orphan);
-
+  if(orphan) 
+    signal_orphan_is_exiting(orphan); 
+ 
   return 0;
 }
 
@@ -411,7 +411,7 @@ static unsigned int CURL_STDCALL gethostbyname_thread(void *arg)
 {
   struct thread_sync_data *tsd = (struct thread_sync_data *)arg;
   struct thread_data *td = tsd->td;
-  struct thread_list *orphan = NULL;
+  struct thread_list *orphan = NULL; 
 
   tsd->res = Curl_ipv4_resolve_r(tsd->hostname, tsd->port);
 
@@ -426,7 +426,7 @@ static unsigned int CURL_STDCALL gethostbyname_thread(void *arg)
     /* too late, gotta clean up the mess */
     Curl_mutex_release(tsd->mtx);
     destroy_thread_sync_data(tsd);
-    orphan = (struct thread_list *)td->reserved;
+    orphan = (struct thread_list *)td->reserved; 
     free(td);
   }
   else {
@@ -434,9 +434,9 @@ static unsigned int CURL_STDCALL gethostbyname_thread(void *arg)
     Curl_mutex_release(tsd->mtx);
   }
 
-  if(orphan)
-    signal_orphan_is_exiting(orphan);
-
+  if(orphan) 
+    signal_orphan_is_exiting(orphan); 
+ 
   return 0;
 }
 
@@ -455,60 +455,60 @@ static void destroy_async_data(struct Curl_async *async)
     struct connectdata *conn = td->tsd.conn;
 #endif
 
-    /* We can't wait any longer for the resolver thread so if it's not done
-     * then it must be orphaned.
-     *
-     * 1) add thread to orphaned threads list
-     * 2) set thread done (this signals to thread it has been orphaned)
-     *
-     * An orphaned thread does most of its own cleanup, and any remaining
-     * cleanup is handled during global cleanup.
+    /* We can't wait any longer for the resolver thread so if it's not done 
+     * then it must be orphaned. 
+     * 
+     * 1) add thread to orphaned threads list 
+     * 2) set thread done (this signals to thread it has been orphaned) 
+     * 
+     * An orphaned thread does most of its own cleanup, and any remaining 
+     * cleanup is handled during global cleanup. 
      */
-
+ 
     Curl_mutex_acquire(td->tsd.mtx);
-
-    if(!td->tsd.done && td->thread_hnd != curl_thread_t_null) {
-      struct thread_list *orphan = (struct thread_list *)td->reserved;
-
-      Curl_mutex_acquire(&orphaned_threads.mutex);
-
-#ifdef DEBUGBUILD
-      {
-        struct thread_list empty;
-        memset(&empty, 0, sizeof(empty));
-        DEBUGASSERT(!memcmp(&empty, orphan, sizeof(empty)));
-      }
-#endif
-
-      orphan->handle = td->thread_hnd;
-      orphan->exiting = false;
-
-      if(orphaned_threads.last) {
-        orphaned_threads.last->next = orphan;
-        orphan->prev = orphaned_threads.last;
-      }
-      else {
-        orphaned_threads.first = orphan;
-        orphan->prev = NULL;
-      }
-      orphaned_threads.last = orphan;
-      orphan->next = NULL;
-
-      Curl_mutex_release(&orphaned_threads.mutex);
-    }
-
+ 
+    if(!td->tsd.done && td->thread_hnd != curl_thread_t_null) { 
+      struct thread_list *orphan = (struct thread_list *)td->reserved; 
+ 
+      Curl_mutex_acquire(&orphaned_threads.mutex); 
+ 
+#ifdef DEBUGBUILD 
+      { 
+        struct thread_list empty; 
+        memset(&empty, 0, sizeof(empty)); 
+        DEBUGASSERT(!memcmp(&empty, orphan, sizeof(empty))); 
+      } 
+#endif 
+ 
+      orphan->handle = td->thread_hnd; 
+      orphan->exiting = false; 
+ 
+      if(orphaned_threads.last) { 
+        orphaned_threads.last->next = orphan; 
+        orphan->prev = orphaned_threads.last; 
+      } 
+      else { 
+        orphaned_threads.first = orphan; 
+        orphan->prev = NULL; 
+      } 
+      orphaned_threads.last = orphan; 
+      orphan->next = NULL; 
+ 
+      Curl_mutex_release(&orphaned_threads.mutex); 
+    } 
+ 
     done = td->tsd.done;
     td->tsd.done = 1;
-
+ 
     Curl_mutex_release(td->tsd.mtx);
 
-    if(done) {
+    if(done) { 
       if(td->thread_hnd != curl_thread_t_null)
         Curl_thread_join(&td->thread_hnd);
 
       destroy_thread_sync_data(&td->tsd);
-      free(td->reserved);
-      free(td);
+      free(td->reserved); 
+      free(td); 
     }
 #ifdef USE_SOCKETPAIR
     /*
@@ -548,11 +548,11 @@ static bool init_resolve_thread(struct connectdata *conn,
   conn->async.status = 0;
   conn->async.dns = NULL;
   td->thread_hnd = curl_thread_t_null;
-  td->reserved = calloc(1, sizeof(struct thread_list));
+  td->reserved = calloc(1, sizeof(struct thread_list)); 
 
-  if(!td->reserved || !init_thread_sync_data(td, hostname, port, hints)) {
+  if(!td->reserved || !init_thread_sync_data(td, hostname, port, hints)) { 
     conn->async.tdata = NULL;
-    free(td->reserved);
+    free(td->reserved); 
     free(td);
     goto errno_exit;
   }
@@ -571,7 +571,7 @@ static bool init_resolve_thread(struct connectdata *conn,
   td->thread_hnd = Curl_thread_create(gethostbyname_thread, &td->tsd);
 #endif
 
-  if(td->thread_hnd == curl_thread_t_null) {
+  if(td->thread_hnd == curl_thread_t_null) { 
     /* The thread never started, so mark it as done here for proper cleanup. */
     td->tsd.done = 1;
     err = errno;
@@ -910,100 +910,100 @@ CURLcode Curl_set_dns_local_ip6(struct Curl_easy *data,
   return CURLE_NOT_BUILT_IN;
 }
 
-/* Helper function to wait and destroy some or all orphaned threads.
- *
- * WAIT_DESTROY_ALL:
- * Wait and destroy all orphaned threads. This operation is not safe to specify
- * in code that could run in any thread that may be orphaned (ie any resolver
- * thread). Waiting on all orphaned threads may take some time. This operation
- * must be specified in the call from global cleanup, and ideally nowhere else.
- *
- * WAIT_DESTROY_EXITING_THREADS_ONLY:
- * Wait and destroy only orphaned threads that are in the process of or have
- * since exited (ie those with .exiting set true). This is fast.
- *
- * When the calling thread owns orphaned_threads.mutex it must not call this
- * function or deadlock my occur.
- */
-static void wait_and_destroy_orphaned_threads(int flags)
-{
-  struct thread_list *thread = NULL;
-
-  Curl_mutex_acquire(&orphaned_threads.mutex);
-
-  if((flags & WAIT_DESTROY_EXITING_THREADS_ONLY)) {
-    struct thread_list *p, *next;
-    struct thread_list *first = NULL, *last = NULL;
-
-    if(!orphaned_threads.exiting_count) {
-      Curl_mutex_release(&orphaned_threads.mutex);
-      return;
-    }
-
-    for(p = orphaned_threads.first; p; p = next) {
-      next = p->next;
-
-      if(!p->exiting)
-        continue;
-
-      /* remove thread list item from orphaned_threads */
-      if(p->prev)
-        p->prev->next = p->next;
-      if(p->next)
-        p->next->prev = p->prev;
-      if(orphaned_threads.first == p)
-        orphaned_threads.first = p->next;
-      if(orphaned_threads.last == p)
-        orphaned_threads.last = p->prev;
-
-      /* add thread list item to new thread list */
-      if(last) {
-        last->next = p;
-        p->prev = last;
-      }
-      else {
-        first = p;
-        p->prev = NULL;
-      }
-      last = p;
-      p->next = NULL;
-    }
-
-    thread = first;
-    orphaned_threads.exiting_count = 0;
-  }
-  else if((flags & WAIT_DESTROY_ALL)) {
-    thread = orphaned_threads.first;
-    orphaned_threads.first = NULL;
-    orphaned_threads.last = NULL;
-    orphaned_threads.exiting_count = 0;
-  }
-
-  Curl_mutex_release(&orphaned_threads.mutex);
-
-  /* Wait and free. Must be done unlocked or there could be deadlock. */
-  while(thread) {
-    struct thread_list *next = thread->next;
-    Curl_thread_join(&thread->handle);
-    free(thread);
-    thread = next;
-  }
-}
-
-/* Helper function that must be called from an orphaned thread right before it
-   exits. */
-static void signal_orphan_is_exiting(struct thread_list *orphan)
-{
-  DEBUGASSERT(orphan->handle && !orphan->exiting);
-
-  wait_and_destroy_orphaned_threads(WAIT_DESTROY_EXITING_THREADS_ONLY);
-
-  Curl_mutex_acquire(&orphaned_threads.mutex);
-
-  orphan->exiting = true;
-  orphaned_threads.exiting_count++;
-
-  Curl_mutex_release(&orphaned_threads.mutex);
-}
-
+/* Helper function to wait and destroy some or all orphaned threads. 
+ * 
+ * WAIT_DESTROY_ALL: 
+ * Wait and destroy all orphaned threads. This operation is not safe to specify 
+ * in code that could run in any thread that may be orphaned (ie any resolver 
+ * thread). Waiting on all orphaned threads may take some time. This operation 
+ * must be specified in the call from global cleanup, and ideally nowhere else. 
+ * 
+ * WAIT_DESTROY_EXITING_THREADS_ONLY: 
+ * Wait and destroy only orphaned threads that are in the process of or have 
+ * since exited (ie those with .exiting set true). This is fast. 
+ * 
+ * When the calling thread owns orphaned_threads.mutex it must not call this 
+ * function or deadlock my occur. 
+ */ 
+static void wait_and_destroy_orphaned_threads(int flags) 
+{ 
+  struct thread_list *thread = NULL; 
+ 
+  Curl_mutex_acquire(&orphaned_threads.mutex); 
+ 
+  if((flags & WAIT_DESTROY_EXITING_THREADS_ONLY)) { 
+    struct thread_list *p, *next; 
+    struct thread_list *first = NULL, *last = NULL; 
+ 
+    if(!orphaned_threads.exiting_count) { 
+      Curl_mutex_release(&orphaned_threads.mutex); 
+      return; 
+    } 
+ 
+    for(p = orphaned_threads.first; p; p = next) { 
+      next = p->next; 
+ 
+      if(!p->exiting) 
+        continue; 
+ 
+      /* remove thread list item from orphaned_threads */ 
+      if(p->prev) 
+        p->prev->next = p->next; 
+      if(p->next) 
+        p->next->prev = p->prev; 
+      if(orphaned_threads.first == p) 
+        orphaned_threads.first = p->next; 
+      if(orphaned_threads.last == p) 
+        orphaned_threads.last = p->prev; 
+ 
+      /* add thread list item to new thread list */ 
+      if(last) { 
+        last->next = p; 
+        p->prev = last; 
+      } 
+      else { 
+        first = p; 
+        p->prev = NULL; 
+      } 
+      last = p; 
+      p->next = NULL; 
+    } 
+ 
+    thread = first; 
+    orphaned_threads.exiting_count = 0; 
+  } 
+  else if((flags & WAIT_DESTROY_ALL)) { 
+    thread = orphaned_threads.first; 
+    orphaned_threads.first = NULL; 
+    orphaned_threads.last = NULL; 
+    orphaned_threads.exiting_count = 0; 
+  } 
+ 
+  Curl_mutex_release(&orphaned_threads.mutex); 
+ 
+  /* Wait and free. Must be done unlocked or there could be deadlock. */ 
+  while(thread) { 
+    struct thread_list *next = thread->next; 
+    Curl_thread_join(&thread->handle); 
+    free(thread); 
+    thread = next; 
+  } 
+} 
+ 
+/* Helper function that must be called from an orphaned thread right before it 
+   exits. */ 
+static void signal_orphan_is_exiting(struct thread_list *orphan) 
+{ 
+  DEBUGASSERT(orphan->handle && !orphan->exiting); 
+ 
+  wait_and_destroy_orphaned_threads(WAIT_DESTROY_EXITING_THREADS_ONLY); 
+ 
+  Curl_mutex_acquire(&orphaned_threads.mutex); 
+ 
+  orphan->exiting = true; 
+  orphaned_threads.exiting_count++; 
+ 
+  Curl_mutex_release(&orphaned_threads.mutex); 
+} 
+ 
 #endif /* CURLRES_THREADED */

+ 3 - 3
contrib/libs/curl/lib/curl_config-android-maps-mobile.h

@@ -540,7 +540,7 @@
 /* #undef HAVE_PROTO_BSDSOCKET_H */
 
 /* if you have <pthread.h> */
-#define HAVE_PTHREAD_H 1
+#define HAVE_PTHREAD_H 1 
 
 /* Define to 1 if you have the <pwd.h> header file. */
 #define HAVE_PWD_H 1
@@ -924,7 +924,7 @@
 /* #undef USE_AMISSL */
 
 /* Define to enable c-ares support */
-/* #define USE_ARES 1 */
+/* #define USE_ARES 1 */ 
 
 /* if GnuTLS is enabled */
 /* #undef USE_GNUTLS */
@@ -975,7 +975,7 @@
 /* #undef USE_SECTRANSP */
 
 /* if you want POSIX threaded DNS lookup */
-#define USE_THREADS_POSIX 1
+#define USE_THREADS_POSIX 1 
 
 /* if you want Win32 threaded DNS lookup */
 /* #undef USE_THREADS_WIN32 */

+ 3 - 3
contrib/libs/curl/lib/curl_config-ios-maps-mobile.h

@@ -540,7 +540,7 @@
 /* #undef HAVE_PROTO_BSDSOCKET_H */
 
 /* if you have <pthread.h> */
-#define HAVE_PTHREAD_H 1
+#define HAVE_PTHREAD_H 1 
 
 /* Define to 1 if you have the <pwd.h> header file. */
 #define HAVE_PWD_H 1
@@ -924,7 +924,7 @@
 /* #undef USE_AMISSL */
 
 /* Define to enable c-ares support */
-/* #define USE_ARES 1 */
+/* #define USE_ARES 1 */ 
 
 /* if GnuTLS is enabled */
 /* #undef USE_GNUTLS */
@@ -975,7 +975,7 @@
 /* #undef USE_SECTRANSP */
 
 /* if you want POSIX threaded DNS lookup */
-#define USE_THREADS_POSIX 1
+#define USE_THREADS_POSIX 1 
 
 /* if you want Win32 threaded DNS lookup */
 /* #undef USE_THREADS_WIN32 */

+ 4 - 4
contrib/libs/curl/lib/curl_threads.h

@@ -37,14 +37,14 @@
 #  define curl_mutex_t           CRITICAL_SECTION
 #  define curl_thread_t          HANDLE
 #  define curl_thread_t_null     (HANDLE)0
-/* The Windows init macro is made to return 0 on success so that it behaves the
-   same as pthreads init which returns 0 on success. */
+/* The Windows init macro is made to return 0 on success so that it behaves the 
+   same as pthreads init which returns 0 on success. */ 
 #  if !defined(_WIN32_WINNT) || !defined(_WIN32_WINNT_VISTA) || \
       (_WIN32_WINNT < _WIN32_WINNT_VISTA) || \
       (defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR))
-#    define Curl_mutex_init(m)   (InitializeCriticalSection(m), 0)
+#    define Curl_mutex_init(m)   (InitializeCriticalSection(m), 0) 
 #  else
-#    define Curl_mutex_init(m)   (!InitializeCriticalSectionEx(m, 0, 1))
+#    define Curl_mutex_init(m)   (!InitializeCriticalSectionEx(m, 0, 1)) 
 #  endif
 #  define Curl_mutex_acquire(m)  EnterCriticalSection(m)
 #  define Curl_mutex_release(m)  LeaveCriticalSection(m)

+ 7 - 7
contrib/libs/curl/lib/multi.c

@@ -557,10 +557,10 @@ static CURLcode multi_done(struct Curl_easy *data,
 
   conn->data = data; /* ensure the connection uses this transfer now */
 
-  /* Cancel the resolver (but not dns_entry yet). We used to call
-     Curl_resolver_kill here but that blocks waiting for incomplete resolve
-     threads (eg getaddrinfo has not returned), which may take a while. */
-  Curl_resolver_cancel(conn);
+  /* Cancel the resolver (but not dns_entry yet). We used to call 
+     Curl_resolver_kill here but that blocks waiting for incomplete resolve 
+     threads (eg getaddrinfo has not returned), which may take a while. */ 
+  Curl_resolver_cancel(conn); 
 
   /* Cleanup possible redirect junk */
   Curl_safefree(data->req.newurl);
@@ -599,9 +599,9 @@ static CURLcode multi_done(struct Curl_easy *data,
   Curl_detach_connnection(data);
   if(CONN_INUSE(conn)) {
     /* Stop if still used. */
-    /* conn->data must not remain pointing to this transfer since it is going
-       away! Find another to own it! */
-    conn->data = conn->easyq.head->ptr;
+    /* conn->data must not remain pointing to this transfer since it is going 
+       away! Find another to own it! */ 
+    conn->data = conn->easyq.head->ptr; 
     CONNCACHE_UNLOCK(data);
     DEBUGF(infof(data, "Connection still in use %zu, "
                  "no more multi_done now!\n",

+ 2 - 2
contrib/libs/curl/lib/url.c

@@ -1249,8 +1249,8 @@ ConnectionExists(struct Curl_easy *data,
       }
 #endif
 
-      DEBUGASSERT(!check->data || GOOD_EASY_HANDLE(check->data));
-
+      DEBUGASSERT(!check->data || GOOD_EASY_HANDLE(check->data)); 
+ 
       if(!canmultiplex && check->data)
         /* this request can't be multiplexed but the checked connection is
            already in use so we skip it */

+ 127 - 127
contrib/libs/nghttp2/AUTHORS

@@ -1,143 +1,143 @@
-nghttp2 project was started as a fork of spdylay project [1].  Both
-projects were started by Tatsuhiro Tsujikawa, who is still the main
-author of these projects.  Meanwhile, we have many contributions, and
-we are not here without them.  We sincerely thank you to all who made
-a contribution.  Here is the all individuals/organizations who
-contributed to nghttp2 and spdylay project at which we forked.  These
-names are retrieved from git commit log.  If you have made a
-contribution, but you are missing in the list, please let us know via
-github issues [2].
-
-[1] https://github.com/tatsuhiro-t/spdylay
-[2] https://github.com/nghttp2/nghttp2/issues
-
---------
-
-187j3x1
-Adam Gołębiowski
-Alek Storm
-Alex Nalivko
-Alexandros Konstantinakis-Karmis
-Alexis La Goutte
+nghttp2 project was started as a fork of spdylay project [1].  Both 
+projects were started by Tatsuhiro Tsujikawa, who is still the main 
+author of these projects.  Meanwhile, we have many contributions, and 
+we are not here without them.  We sincerely thank you to all who made 
+a contribution.  Here is the all individuals/organizations who 
+contributed to nghttp2 and spdylay project at which we forked.  These 
+names are retrieved from git commit log.  If you have made a 
+contribution, but you are missing in the list, please let us know via 
+github issues [2]. 
+ 
+[1] https://github.com/tatsuhiro-t/spdylay 
+[2] https://github.com/nghttp2/nghttp2/issues 
+ 
+-------- 
+ 
+187j3x1 
+Adam Gołębiowski 
+Alek Storm 
+Alex Nalivko 
+Alexandros Konstantinakis-Karmis 
+Alexis La Goutte 
 Amir Livneh
-Amir Pakdel
-Anders Bakken
-Andreas Pohl
-Andrew Penkrat
-Andy Davies
-Angus Gratton
-Anna Henningsen
-Ant Bryan
+Amir Pakdel 
+Anders Bakken 
+Andreas Pohl 
+Andrew Penkrat 
+Andy Davies 
+Angus Gratton 
+Anna Henningsen 
+Ant Bryan 
 Asra Ali
-Benedikt Christoph Wolters
-Benjamin Peterson
-Bernard Spil
-Brendan Heinonen
-Brian Card
-Brian Suh
+Benedikt Christoph Wolters 
+Benjamin Peterson 
+Bernard Spil 
+Brendan Heinonen 
+Brian Card 
+Brian Suh 
 Daniel Bevenius
-Daniel Evers
-Daniel Stenberg
-Dave Reisner
-David Beitey
-David Weekly
+Daniel Evers 
+Daniel Stenberg 
+Dave Reisner 
+David Beitey 
+David Weekly 
 Dmitri Tikhonov
-Dmitriy Vetutnev
-Don
-Dylan Plecki
-Etienne Cimon
-Fabian Möller
-Fabian Wiesel
-Gabi Davar
+Dmitriy Vetutnev 
+Don 
+Dylan Plecki 
+Etienne Cimon 
+Fabian Möller 
+Fabian Wiesel 
+Gabi Davar 
 Gaël PORTAY
 Geoff Hill
 George Liu
-Gitai
-Google Inc.
+Gitai 
+Google Inc. 
 Hajime Fujita
 Jacky Tian
 Jacky_Yin
-Jacob Champion
+Jacob Champion 
 James M Snell
-Jan Kundrát
-Jan-E
-Janusz Dziemidowicz
-Jay Satiro
-Jeff 'Raid' Baitis
-Jianqing Wang
-Jim Morrison
-Josh Braegger
-José F. Calcerrada
-Kamil Dudka
-Kazuho Oku
-Kenny (kang-yen) Peng
-Kenny Peng
-Kit Chan
-Kyle Schomp
-LazyHamster
+Jan Kundrát 
+Jan-E 
+Janusz Dziemidowicz 
+Jay Satiro 
+Jeff 'Raid' Baitis 
+Jianqing Wang 
+Jim Morrison 
+Josh Braegger 
+José F. Calcerrada 
+Kamil Dudka 
+Kazuho Oku 
+Kenny (kang-yen) Peng 
+Kenny Peng 
+Kit Chan 
+Kyle Schomp 
+LazyHamster 
 Leo Neat
 Lorenz Nickel
-Lucas Pardue
-MATSUMOTO Ryosuke
-Marc Bachmann
-Matt Rudary
-Matt Way
+Lucas Pardue 
+MATSUMOTO Ryosuke 
+Marc Bachmann 
+Matt Rudary 
+Matt Way 
 Michael Kaufmann
-Mike Conlen
-Mike Frysinger
-Mike Lothian
-Nicholas Hurley
-Nora Shoemaker
-Pedro Santos
-Peeyush Aggarwal
-Peter Wu
-Piotr Sikora
-Raul Gutierrez Segales
-Remo E
-Reza Tavakoli
-Richard Wolfert
-Rick Lei
-Ross Smith II
-Scott Mitchell
-Sebastiaan Deckers
-Simon Frankenberger
-Simone Basso
-Soham Sinha
-Stefan Eissing
-Stephen Ludin
-Sunpoet Po-Chuan Hsieh
-Svante Signell
-Syohei YOSHIDA
-Tapanito
-Tatsuhiko Kubo
-Tatsuhiro Tsujikawa
-Tobias Geerinckx-Rice
-Tom Harwood
+Mike Conlen 
+Mike Frysinger 
+Mike Lothian 
+Nicholas Hurley 
+Nora Shoemaker 
+Pedro Santos 
+Peeyush Aggarwal 
+Peter Wu 
+Piotr Sikora 
+Raul Gutierrez Segales 
+Remo E 
+Reza Tavakoli 
+Richard Wolfert 
+Rick Lei 
+Ross Smith II 
+Scott Mitchell 
+Sebastiaan Deckers 
+Simon Frankenberger 
+Simone Basso 
+Soham Sinha 
+Stefan Eissing 
+Stephen Ludin 
+Sunpoet Po-Chuan Hsieh 
+Svante Signell 
+Syohei YOSHIDA 
+Tapanito 
+Tatsuhiko Kubo 
+Tatsuhiro Tsujikawa 
+Tobias Geerinckx-Rice 
+Tom Harwood 
 Tomas Krizek
-Tomasz Buchert
-Tomasz Torcz
-Vernon Tang
-Viacheslav Biriukov
-Viktor Szakats
-Viktor Szépe
-Wenfeng Liu
-William A Rowe Jr
-Xiaoguang Sun
-Zhuoyun Wei
-acesso
-ayanamist
-bxshi
-clemahieu
-dalf
-dawg
-es
-fangdingjun
-jwchoi
-kumagi
+Tomasz Buchert 
+Tomasz Torcz 
+Vernon Tang 
+Viacheslav Biriukov 
+Viktor Szakats 
+Viktor Szépe 
+Wenfeng Liu 
+William A Rowe Jr 
+Xiaoguang Sun 
+Zhuoyun Wei 
+acesso 
+ayanamist 
+bxshi 
+clemahieu 
+dalf 
+dawg 
+es 
+fangdingjun 
+jwchoi 
+kumagi 
 lhuang04
-lstefani
-makovich
-mod-h2-dev
-moparisthebest
-snnn
-yuuki-kodama
+lstefani 
+makovich 
+mod-h2-dev 
+moparisthebest 
+snnn 
+yuuki-kodama 

+ 368 - 368
contrib/libs/nghttp2/INSTALL

@@ -1,368 +1,368 @@
-Installation Instructions
-*************************
-
-   Copyright (C) 1994-1996, 1999-2002, 2004-2016 Free Software
-Foundation, Inc.
-
-   Copying and distribution of this file, with or without modification,
-are permitted in any medium without royalty provided the copyright
-notice and this notice are preserved.  This file is offered as-is,
-without warranty of any kind.
-
-Basic Installation
-==================
-
-   Briefly, the shell command './configure && make && make install'
-should configure, build, and install this package.  The following
-more-detailed instructions are generic; see the 'README' file for
-instructions specific to this package.  Some packages provide this
-'INSTALL' file but do not implement all of the features documented
-below.  The lack of an optional feature in a given package is not
-necessarily a bug.  More recommendations for GNU packages can be found
-in *note Makefile Conventions: (standards)Makefile Conventions.
-
-   The 'configure' shell script attempts to guess correct values for
-various system-dependent variables used during compilation.  It uses
-those values to create a 'Makefile' in each directory of the package.
-It may also create one or more '.h' files containing system-dependent
-definitions.  Finally, it creates a shell script 'config.status' that
-you can run in the future to recreate the current configuration, and a
-file 'config.log' containing compiler output (useful mainly for
-debugging 'configure').
-
-   It can also use an optional file (typically called 'config.cache' and
-enabled with '--cache-file=config.cache' or simply '-C') that saves the
-results of its tests to speed up reconfiguring.  Caching is disabled by
-default to prevent problems with accidental use of stale cache files.
-
-   If you need to do unusual things to compile the package, please try
-to figure out how 'configure' could check whether to do them, and mail
-diffs or instructions to the address given in the 'README' so they can
-be considered for the next release.  If you are using the cache, and at
-some point 'config.cache' contains results you don't want to keep, you
-may remove or edit it.
-
-   The file 'configure.ac' (or 'configure.in') is used to create
-'configure' by a program called 'autoconf'.  You need 'configure.ac' if
-you want to change it or regenerate 'configure' using a newer version of
-'autoconf'.
-
-   The simplest way to compile this package is:
-
-  1. 'cd' to the directory containing the package's source code and type
-     './configure' to configure the package for your system.
-
-     Running 'configure' might take a while.  While running, it prints
-     some messages telling which features it is checking for.
-
-  2. Type 'make' to compile the package.
-
-  3. Optionally, type 'make check' to run any self-tests that come with
-     the package, generally using the just-built uninstalled binaries.
-
-  4. Type 'make install' to install the programs and any data files and
-     documentation.  When installing into a prefix owned by root, it is
-     recommended that the package be configured and built as a regular
-     user, and only the 'make install' phase executed with root
-     privileges.
-
-  5. Optionally, type 'make installcheck' to repeat any self-tests, but
-     this time using the binaries in their final installed location.
-     This target does not install anything.  Running this target as a
-     regular user, particularly if the prior 'make install' required
-     root privileges, verifies that the installation completed
-     correctly.
-
-  6. You can remove the program binaries and object files from the
-     source code directory by typing 'make clean'.  To also remove the
-     files that 'configure' created (so you can compile the package for
-     a different kind of computer), type 'make distclean'.  There is
-     also a 'make maintainer-clean' target, but that is intended mainly
-     for the package's developers.  If you use it, you may have to get
-     all sorts of other programs in order to regenerate files that came
-     with the distribution.
-
-  7. Often, you can also type 'make uninstall' to remove the installed
-     files again.  In practice, not all packages have tested that
-     uninstallation works correctly, even though it is required by the
-     GNU Coding Standards.
-
-  8. Some packages, particularly those that use Automake, provide 'make
-     distcheck', which can by used by developers to test that all other
-     targets like 'make install' and 'make uninstall' work correctly.
-     This target is generally not run by end users.
-
-Compilers and Options
-=====================
-
-   Some systems require unusual options for compilation or linking that
-the 'configure' script does not know about.  Run './configure --help'
-for details on some of the pertinent environment variables.
-
-   You can give 'configure' initial values for configuration parameters
-by setting variables in the command line or in the environment.  Here is
-an example:
-
-     ./configure CC=c99 CFLAGS=-g LIBS=-lposix
-
-   *Note Defining Variables::, for more details.
-
-Compiling For Multiple Architectures
-====================================
-
-   You can compile the package for more than one kind of computer at the
-same time, by placing the object files for each architecture in their
-own directory.  To do this, you can use GNU 'make'.  'cd' to the
-directory where you want the object files and executables to go and run
-the 'configure' script.  'configure' automatically checks for the source
-code in the directory that 'configure' is in and in '..'.  This is known
-as a "VPATH" build.
-
-   With a non-GNU 'make', it is safer to compile the package for one
-architecture at a time in the source code directory.  After you have
-installed the package for one architecture, use 'make distclean' before
-reconfiguring for another architecture.
-
-   On MacOS X 10.5 and later systems, you can create libraries and
-executables that work on multiple system types--known as "fat" or
-"universal" binaries--by specifying multiple '-arch' options to the
-compiler but only a single '-arch' option to the preprocessor.  Like
-this:
-
-     ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
-                 CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
-                 CPP="gcc -E" CXXCPP="g++ -E"
-
-   This is not guaranteed to produce working output in all cases, you
-may have to build one architecture at a time and combine the results
-using the 'lipo' tool if you have problems.
-
-Installation Names
-==================
-
-   By default, 'make install' installs the package's commands under
-'/usr/local/bin', include files under '/usr/local/include', etc.  You
-can specify an installation prefix other than '/usr/local' by giving
-'configure' the option '--prefix=PREFIX', where PREFIX must be an
-absolute file name.
-
-   You can specify separate installation prefixes for
-architecture-specific files and architecture-independent files.  If you
-pass the option '--exec-prefix=PREFIX' to 'configure', the package uses
-PREFIX as the prefix for installing programs and libraries.
-Documentation and other data files still use the regular prefix.
-
-   In addition, if you use an unusual directory layout you can give
-options like '--bindir=DIR' to specify different values for particular
-kinds of files.  Run 'configure --help' for a list of the directories
-you can set and what kinds of files go in them.  In general, the default
-for these options is expressed in terms of '${prefix}', so that
-specifying just '--prefix' will affect all of the other directory
-specifications that were not explicitly provided.
-
-   The most portable way to affect installation locations is to pass the
-correct locations to 'configure'; however, many packages provide one or
-both of the following shortcuts of passing variable assignments to the
-'make install' command line to change installation locations without
-having to reconfigure or recompile.
-
-   The first method involves providing an override variable for each
-affected directory.  For example, 'make install
-prefix=/alternate/directory' will choose an alternate location for all
-directory configuration variables that were expressed in terms of
-'${prefix}'.  Any directories that were specified during 'configure',
-but not in terms of '${prefix}', must each be overridden at install time
-for the entire installation to be relocated.  The approach of makefile
-variable overrides for each directory variable is required by the GNU
-Coding Standards, and ideally causes no recompilation.  However, some
-platforms have known limitations with the semantics of shared libraries
-that end up requiring recompilation when using this method, particularly
-noticeable in packages that use GNU Libtool.
-
-   The second method involves providing the 'DESTDIR' variable.  For
-example, 'make install DESTDIR=/alternate/directory' will prepend
-'/alternate/directory' before all installation names.  The approach of
-'DESTDIR' overrides is not required by the GNU Coding Standards, and
-does not work on platforms that have drive letters.  On the other hand,
-it does better at avoiding recompilation issues, and works well even
-when some directory options were not specified in terms of '${prefix}'
-at 'configure' time.
-
-Optional Features
-=================
-
-   If the package supports it, you can cause programs to be installed
-with an extra prefix or suffix on their names by giving 'configure' the
-option '--program-prefix=PREFIX' or '--program-suffix=SUFFIX'.
-
-   Some packages pay attention to '--enable-FEATURE' options to
-'configure', where FEATURE indicates an optional part of the package.
-They may also pay attention to '--with-PACKAGE' options, where PACKAGE
-is something like 'gnu-as' or 'x' (for the X Window System).  The
-'README' should mention any '--enable-' and '--with-' options that the
-package recognizes.
-
-   For packages that use the X Window System, 'configure' can usually
-find the X include and library files automatically, but if it doesn't,
-you can use the 'configure' options '--x-includes=DIR' and
-'--x-libraries=DIR' to specify their locations.
-
-   Some packages offer the ability to configure how verbose the
-execution of 'make' will be.  For these packages, running './configure
---enable-silent-rules' sets the default to minimal output, which can be
-overridden with 'make V=1'; while running './configure
---disable-silent-rules' sets the default to verbose, which can be
-overridden with 'make V=0'.
-
-Particular systems
-==================
-
-   On HP-UX, the default C compiler is not ANSI C compatible.  If GNU CC
-is not installed, it is recommended to use the following options in
-order to use an ANSI C compiler:
-
-     ./configure CC="cc -Ae -D_XOPEN_SOURCE=500"
-
-and if that doesn't work, install pre-built binaries of GCC for HP-UX.
-
-   HP-UX 'make' updates targets which have the same time stamps as their
-prerequisites, which makes it generally unusable when shipped generated
-files such as 'configure' are involved.  Use GNU 'make' instead.
-
-   On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot
-parse its '<wchar.h>' header file.  The option '-nodtk' can be used as a
-workaround.  If GNU CC is not installed, it is therefore recommended to
-try
-
-     ./configure CC="cc"
-
-and if that doesn't work, try
-
-     ./configure CC="cc -nodtk"
-
-   On Solaris, don't put '/usr/ucb' early in your 'PATH'.  This
-directory contains several dysfunctional programs; working variants of
-these programs are available in '/usr/bin'.  So, if you need '/usr/ucb'
-in your 'PATH', put it _after_ '/usr/bin'.
-
-   On Haiku, software installed for all users goes in '/boot/common',
-not '/usr/local'.  It is recommended to use the following options:
-
-     ./configure --prefix=/boot/common
-
-Specifying the System Type
-==========================
-
-   There may be some features 'configure' cannot figure out
-automatically, but needs to determine by the type of machine the package
-will run on.  Usually, assuming the package is built to be run on the
-_same_ architectures, 'configure' can figure that out, but if it prints
-a message saying it cannot guess the machine type, give it the
-'--build=TYPE' option.  TYPE can either be a short name for the system
-type, such as 'sun4', or a canonical name which has the form:
-
-     CPU-COMPANY-SYSTEM
-
-where SYSTEM can have one of these forms:
-
-     OS
-     KERNEL-OS
-
-   See the file 'config.sub' for the possible values of each field.  If
-'config.sub' isn't included in this package, then this package doesn't
-need to know the machine type.
-
-   If you are _building_ compiler tools for cross-compiling, you should
-use the option '--target=TYPE' to select the type of system they will
-produce code for.
-
-   If you want to _use_ a cross compiler, that generates code for a
-platform different from the build platform, you should specify the
-"host" platform (i.e., that on which the generated programs will
-eventually be run) with '--host=TYPE'.
-
-Sharing Defaults
-================
-
-   If you want to set default values for 'configure' scripts to share,
-you can create a site shell script called 'config.site' that gives
-default values for variables like 'CC', 'cache_file', and 'prefix'.
-'configure' looks for 'PREFIX/share/config.site' if it exists, then
-'PREFIX/etc/config.site' if it exists.  Or, you can set the
-'CONFIG_SITE' environment variable to the location of the site script.
-A warning: not all 'configure' scripts look for a site script.
-
-Defining Variables
-==================
-
-   Variables not defined in a site shell script can be set in the
-environment passed to 'configure'.  However, some packages may run
-configure again during the build, and the customized values of these
-variables may be lost.  In order to avoid this problem, you should set
-them in the 'configure' command line, using 'VAR=value'.  For example:
-
-     ./configure CC=/usr/local2/bin/gcc
-
-causes the specified 'gcc' to be used as the C compiler (unless it is
-overridden in the site shell script).
-
-Unfortunately, this technique does not work for 'CONFIG_SHELL' due to an
-Autoconf limitation.  Until the limitation is lifted, you can use this
-workaround:
-
-     CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash
-
-'configure' Invocation
-======================
-
-   'configure' recognizes the following options to control how it
-operates.
-
-'--help'
-'-h'
-     Print a summary of all of the options to 'configure', and exit.
-
-'--help=short'
-'--help=recursive'
-     Print a summary of the options unique to this package's
-     'configure', and exit.  The 'short' variant lists options used only
-     in the top level, while the 'recursive' variant lists options also
-     present in any nested packages.
-
-'--version'
-'-V'
-     Print the version of Autoconf used to generate the 'configure'
-     script, and exit.
-
-'--cache-file=FILE'
-     Enable the cache: use and save the results of the tests in FILE,
-     traditionally 'config.cache'.  FILE defaults to '/dev/null' to
-     disable caching.
-
-'--config-cache'
-'-C'
-     Alias for '--cache-file=config.cache'.
-
-'--quiet'
-'--silent'
-'-q'
-     Do not print messages saying which checks are being made.  To
-     suppress all normal output, redirect it to '/dev/null' (any error
-     messages will still be shown).
-
-'--srcdir=DIR'
-     Look for the package's source code in directory DIR.  Usually
-     'configure' can determine that directory automatically.
-
-'--prefix=DIR'
-     Use DIR as the installation prefix.  *note Installation Names:: for
-     more details, including other options available for fine-tuning the
-     installation locations.
-
-'--no-create'
-'-n'
-     Run the configure checks, but stop before creating any output
-     files.
-
-'configure' also accepts some other, not widely useful, options.  Run
-'configure --help' for more details.
+Installation Instructions 
+************************* 
+ 
+   Copyright (C) 1994-1996, 1999-2002, 2004-2016 Free Software 
+Foundation, Inc. 
+ 
+   Copying and distribution of this file, with or without modification, 
+are permitted in any medium without royalty provided the copyright 
+notice and this notice are preserved.  This file is offered as-is, 
+without warranty of any kind. 
+ 
+Basic Installation 
+================== 
+ 
+   Briefly, the shell command './configure && make && make install' 
+should configure, build, and install this package.  The following 
+more-detailed instructions are generic; see the 'README' file for 
+instructions specific to this package.  Some packages provide this 
+'INSTALL' file but do not implement all of the features documented 
+below.  The lack of an optional feature in a given package is not 
+necessarily a bug.  More recommendations for GNU packages can be found 
+in *note Makefile Conventions: (standards)Makefile Conventions. 
+ 
+   The 'configure' shell script attempts to guess correct values for 
+various system-dependent variables used during compilation.  It uses 
+those values to create a 'Makefile' in each directory of the package. 
+It may also create one or more '.h' files containing system-dependent 
+definitions.  Finally, it creates a shell script 'config.status' that 
+you can run in the future to recreate the current configuration, and a 
+file 'config.log' containing compiler output (useful mainly for 
+debugging 'configure'). 
+ 
+   It can also use an optional file (typically called 'config.cache' and 
+enabled with '--cache-file=config.cache' or simply '-C') that saves the 
+results of its tests to speed up reconfiguring.  Caching is disabled by 
+default to prevent problems with accidental use of stale cache files. 
+ 
+   If you need to do unusual things to compile the package, please try 
+to figure out how 'configure' could check whether to do them, and mail 
+diffs or instructions to the address given in the 'README' so they can 
+be considered for the next release.  If you are using the cache, and at 
+some point 'config.cache' contains results you don't want to keep, you 
+may remove or edit it. 
+ 
+   The file 'configure.ac' (or 'configure.in') is used to create 
+'configure' by a program called 'autoconf'.  You need 'configure.ac' if 
+you want to change it or regenerate 'configure' using a newer version of 
+'autoconf'. 
+ 
+   The simplest way to compile this package is: 
+ 
+  1. 'cd' to the directory containing the package's source code and type 
+     './configure' to configure the package for your system. 
+ 
+     Running 'configure' might take a while.  While running, it prints 
+     some messages telling which features it is checking for. 
+ 
+  2. Type 'make' to compile the package. 
+ 
+  3. Optionally, type 'make check' to run any self-tests that come with 
+     the package, generally using the just-built uninstalled binaries. 
+ 
+  4. Type 'make install' to install the programs and any data files and 
+     documentation.  When installing into a prefix owned by root, it is 
+     recommended that the package be configured and built as a regular 
+     user, and only the 'make install' phase executed with root 
+     privileges. 
+ 
+  5. Optionally, type 'make installcheck' to repeat any self-tests, but 
+     this time using the binaries in their final installed location. 
+     This target does not install anything.  Running this target as a 
+     regular user, particularly if the prior 'make install' required 
+     root privileges, verifies that the installation completed 
+     correctly. 
+ 
+  6. You can remove the program binaries and object files from the 
+     source code directory by typing 'make clean'.  To also remove the 
+     files that 'configure' created (so you can compile the package for 
+     a different kind of computer), type 'make distclean'.  There is 
+     also a 'make maintainer-clean' target, but that is intended mainly 
+     for the package's developers.  If you use it, you may have to get 
+     all sorts of other programs in order to regenerate files that came 
+     with the distribution. 
+ 
+  7. Often, you can also type 'make uninstall' to remove the installed 
+     files again.  In practice, not all packages have tested that 
+     uninstallation works correctly, even though it is required by the 
+     GNU Coding Standards. 
+ 
+  8. Some packages, particularly those that use Automake, provide 'make 
+     distcheck', which can by used by developers to test that all other 
+     targets like 'make install' and 'make uninstall' work correctly. 
+     This target is generally not run by end users. 
+ 
+Compilers and Options 
+===================== 
+ 
+   Some systems require unusual options for compilation or linking that 
+the 'configure' script does not know about.  Run './configure --help' 
+for details on some of the pertinent environment variables. 
+ 
+   You can give 'configure' initial values for configuration parameters 
+by setting variables in the command line or in the environment.  Here is 
+an example: 
+ 
+     ./configure CC=c99 CFLAGS=-g LIBS=-lposix 
+ 
+   *Note Defining Variables::, for more details. 
+ 
+Compiling For Multiple Architectures 
+==================================== 
+ 
+   You can compile the package for more than one kind of computer at the 
+same time, by placing the object files for each architecture in their 
+own directory.  To do this, you can use GNU 'make'.  'cd' to the 
+directory where you want the object files and executables to go and run 
+the 'configure' script.  'configure' automatically checks for the source 
+code in the directory that 'configure' is in and in '..'.  This is known 
+as a "VPATH" build. 
+ 
+   With a non-GNU 'make', it is safer to compile the package for one 
+architecture at a time in the source code directory.  After you have 
+installed the package for one architecture, use 'make distclean' before 
+reconfiguring for another architecture. 
+ 
+   On MacOS X 10.5 and later systems, you can create libraries and 
+executables that work on multiple system types--known as "fat" or 
+"universal" binaries--by specifying multiple '-arch' options to the 
+compiler but only a single '-arch' option to the preprocessor.  Like 
+this: 
+ 
+     ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ 
+                 CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ 
+                 CPP="gcc -E" CXXCPP="g++ -E" 
+ 
+   This is not guaranteed to produce working output in all cases, you 
+may have to build one architecture at a time and combine the results 
+using the 'lipo' tool if you have problems. 
+ 
+Installation Names 
+================== 
+ 
+   By default, 'make install' installs the package's commands under 
+'/usr/local/bin', include files under '/usr/local/include', etc.  You 
+can specify an installation prefix other than '/usr/local' by giving 
+'configure' the option '--prefix=PREFIX', where PREFIX must be an 
+absolute file name. 
+ 
+   You can specify separate installation prefixes for 
+architecture-specific files and architecture-independent files.  If you 
+pass the option '--exec-prefix=PREFIX' to 'configure', the package uses 
+PREFIX as the prefix for installing programs and libraries. 
+Documentation and other data files still use the regular prefix. 
+ 
+   In addition, if you use an unusual directory layout you can give 
+options like '--bindir=DIR' to specify different values for particular 
+kinds of files.  Run 'configure --help' for a list of the directories 
+you can set and what kinds of files go in them.  In general, the default 
+for these options is expressed in terms of '${prefix}', so that 
+specifying just '--prefix' will affect all of the other directory 
+specifications that were not explicitly provided. 
+ 
+   The most portable way to affect installation locations is to pass the 
+correct locations to 'configure'; however, many packages provide one or 
+both of the following shortcuts of passing variable assignments to the 
+'make install' command line to change installation locations without 
+having to reconfigure or recompile. 
+ 
+   The first method involves providing an override variable for each 
+affected directory.  For example, 'make install 
+prefix=/alternate/directory' will choose an alternate location for all 
+directory configuration variables that were expressed in terms of 
+'${prefix}'.  Any directories that were specified during 'configure', 
+but not in terms of '${prefix}', must each be overridden at install time 
+for the entire installation to be relocated.  The approach of makefile 
+variable overrides for each directory variable is required by the GNU 
+Coding Standards, and ideally causes no recompilation.  However, some 
+platforms have known limitations with the semantics of shared libraries 
+that end up requiring recompilation when using this method, particularly 
+noticeable in packages that use GNU Libtool. 
+ 
+   The second method involves providing the 'DESTDIR' variable.  For 
+example, 'make install DESTDIR=/alternate/directory' will prepend 
+'/alternate/directory' before all installation names.  The approach of 
+'DESTDIR' overrides is not required by the GNU Coding Standards, and 
+does not work on platforms that have drive letters.  On the other hand, 
+it does better at avoiding recompilation issues, and works well even 
+when some directory options were not specified in terms of '${prefix}' 
+at 'configure' time. 
+ 
+Optional Features 
+================= 
+ 
+   If the package supports it, you can cause programs to be installed 
+with an extra prefix or suffix on their names by giving 'configure' the 
+option '--program-prefix=PREFIX' or '--program-suffix=SUFFIX'. 
+ 
+   Some packages pay attention to '--enable-FEATURE' options to 
+'configure', where FEATURE indicates an optional part of the package. 
+They may also pay attention to '--with-PACKAGE' options, where PACKAGE 
+is something like 'gnu-as' or 'x' (for the X Window System).  The 
+'README' should mention any '--enable-' and '--with-' options that the 
+package recognizes. 
+ 
+   For packages that use the X Window System, 'configure' can usually 
+find the X include and library files automatically, but if it doesn't, 
+you can use the 'configure' options '--x-includes=DIR' and 
+'--x-libraries=DIR' to specify their locations. 
+ 
+   Some packages offer the ability to configure how verbose the 
+execution of 'make' will be.  For these packages, running './configure 
+--enable-silent-rules' sets the default to minimal output, which can be 
+overridden with 'make V=1'; while running './configure 
+--disable-silent-rules' sets the default to verbose, which can be 
+overridden with 'make V=0'. 
+ 
+Particular systems 
+================== 
+ 
+   On HP-UX, the default C compiler is not ANSI C compatible.  If GNU CC 
+is not installed, it is recommended to use the following options in 
+order to use an ANSI C compiler: 
+ 
+     ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" 
+ 
+and if that doesn't work, install pre-built binaries of GCC for HP-UX. 
+ 
+   HP-UX 'make' updates targets which have the same time stamps as their 
+prerequisites, which makes it generally unusable when shipped generated 
+files such as 'configure' are involved.  Use GNU 'make' instead. 
+ 
+   On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot 
+parse its '<wchar.h>' header file.  The option '-nodtk' can be used as a 
+workaround.  If GNU CC is not installed, it is therefore recommended to 
+try 
+ 
+     ./configure CC="cc" 
+ 
+and if that doesn't work, try 
+ 
+     ./configure CC="cc -nodtk" 
+ 
+   On Solaris, don't put '/usr/ucb' early in your 'PATH'.  This 
+directory contains several dysfunctional programs; working variants of 
+these programs are available in '/usr/bin'.  So, if you need '/usr/ucb' 
+in your 'PATH', put it _after_ '/usr/bin'. 
+ 
+   On Haiku, software installed for all users goes in '/boot/common', 
+not '/usr/local'.  It is recommended to use the following options: 
+ 
+     ./configure --prefix=/boot/common 
+ 
+Specifying the System Type 
+========================== 
+ 
+   There may be some features 'configure' cannot figure out 
+automatically, but needs to determine by the type of machine the package 
+will run on.  Usually, assuming the package is built to be run on the 
+_same_ architectures, 'configure' can figure that out, but if it prints 
+a message saying it cannot guess the machine type, give it the 
+'--build=TYPE' option.  TYPE can either be a short name for the system 
+type, such as 'sun4', or a canonical name which has the form: 
+ 
+     CPU-COMPANY-SYSTEM 
+ 
+where SYSTEM can have one of these forms: 
+ 
+     OS 
+     KERNEL-OS 
+ 
+   See the file 'config.sub' for the possible values of each field.  If 
+'config.sub' isn't included in this package, then this package doesn't 
+need to know the machine type. 
+ 
+   If you are _building_ compiler tools for cross-compiling, you should 
+use the option '--target=TYPE' to select the type of system they will 
+produce code for. 
+ 
+   If you want to _use_ a cross compiler, that generates code for a 
+platform different from the build platform, you should specify the 
+"host" platform (i.e., that on which the generated programs will 
+eventually be run) with '--host=TYPE'. 
+ 
+Sharing Defaults 
+================ 
+ 
+   If you want to set default values for 'configure' scripts to share, 
+you can create a site shell script called 'config.site' that gives 
+default values for variables like 'CC', 'cache_file', and 'prefix'. 
+'configure' looks for 'PREFIX/share/config.site' if it exists, then 
+'PREFIX/etc/config.site' if it exists.  Or, you can set the 
+'CONFIG_SITE' environment variable to the location of the site script. 
+A warning: not all 'configure' scripts look for a site script. 
+ 
+Defining Variables 
+================== 
+ 
+   Variables not defined in a site shell script can be set in the 
+environment passed to 'configure'.  However, some packages may run 
+configure again during the build, and the customized values of these 
+variables may be lost.  In order to avoid this problem, you should set 
+them in the 'configure' command line, using 'VAR=value'.  For example: 
+ 
+     ./configure CC=/usr/local2/bin/gcc 
+ 
+causes the specified 'gcc' to be used as the C compiler (unless it is 
+overridden in the site shell script). 
+ 
+Unfortunately, this technique does not work for 'CONFIG_SHELL' due to an 
+Autoconf limitation.  Until the limitation is lifted, you can use this 
+workaround: 
+ 
+     CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash 
+ 
+'configure' Invocation 
+====================== 
+ 
+   'configure' recognizes the following options to control how it 
+operates. 
+ 
+'--help' 
+'-h' 
+     Print a summary of all of the options to 'configure', and exit. 
+ 
+'--help=short' 
+'--help=recursive' 
+     Print a summary of the options unique to this package's 
+     'configure', and exit.  The 'short' variant lists options used only 
+     in the top level, while the 'recursive' variant lists options also 
+     present in any nested packages. 
+ 
+'--version' 
+'-V' 
+     Print the version of Autoconf used to generate the 'configure' 
+     script, and exit. 
+ 
+'--cache-file=FILE' 
+     Enable the cache: use and save the results of the tests in FILE, 
+     traditionally 'config.cache'.  FILE defaults to '/dev/null' to 
+     disable caching. 
+ 
+'--config-cache' 
+'-C' 
+     Alias for '--cache-file=config.cache'. 
+ 
+'--quiet' 
+'--silent' 
+'-q' 
+     Do not print messages saying which checks are being made.  To 
+     suppress all normal output, redirect it to '/dev/null' (any error 
+     messages will still be shown). 
+ 
+'--srcdir=DIR' 
+     Look for the package's source code in directory DIR.  Usually 
+     'configure' can determine that directory automatically. 
+ 
+'--prefix=DIR' 
+     Use DIR as the installation prefix.  *note Installation Names:: for 
+     more details, including other options available for fine-tuning the 
+     installation locations. 
+ 
+'--no-create' 
+'-n' 
+     Run the configure checks, but stop before creating any output 
+     files. 
+ 
+'configure' also accepts some other, not widely useful, options.  Run 
+'configure --help' for more details. 

Some files were not shown because too many files changed in this diff