agg_array.h 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119
  1. //----------------------------------------------------------------------------
  2. // Anti-Grain Geometry - Version 2.4
  3. // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
  4. //
  5. // Permission to copy, use, modify, sell and distribute this software
  6. // is granted provided this copyright notice appears in all copies.
  7. // This software is provided "as is" without express or implied
  8. // warranty, and with no claim as to its suitability for any purpose.
  9. //
  10. //----------------------------------------------------------------------------
  11. // Contact: mcseem@antigrain.com
  12. // mcseemagg@yahoo.com
  13. // http://www.antigrain.com
  14. //----------------------------------------------------------------------------
  15. #ifndef AGG_ARRAY_INCLUDED
  16. #define AGG_ARRAY_INCLUDED
  17. #include <stddef.h>
  18. #include <string.h>
  19. #include "agg_basics.h"
  20. namespace agg
  21. {
  22. //-------------------------------------------------------pod_array_adaptor
  23. template<class T> class pod_array_adaptor
  24. {
  25. public:
  26. typedef T value_type;
  27. pod_array_adaptor(T* array, unsigned size) :
  28. m_array(array), m_size(size) {}
  29. unsigned size() const { return m_size; }
  30. const T& operator [] (unsigned i) const { return m_array[i]; }
  31. T& operator [] (unsigned i) { return m_array[i]; }
  32. const T& at(unsigned i) const { return m_array[i]; }
  33. T& at(unsigned i) { return m_array[i]; }
  34. T value_at(unsigned i) const { return m_array[i]; }
  35. private:
  36. T* m_array;
  37. unsigned m_size;
  38. };
  39. //---------------------------------------------------------pod_auto_array
  40. template<class T, unsigned Size> class pod_auto_array
  41. {
  42. public:
  43. typedef T value_type;
  44. typedef pod_auto_array<T, Size> self_type;
  45. pod_auto_array() {}
  46. explicit pod_auto_array(const T* c)
  47. {
  48. memcpy(m_array, c, sizeof(T) * Size);
  49. }
  50. const self_type& operator = (const T* c)
  51. {
  52. memcpy(m_array, c, sizeof(T) * Size);
  53. return *this;
  54. }
  55. static unsigned size() { return Size; }
  56. const T& operator [] (unsigned i) const { return m_array[i]; }
  57. T& operator [] (unsigned i) { return m_array[i]; }
  58. const T& at(unsigned i) const { return m_array[i]; }
  59. T& at(unsigned i) { return m_array[i]; }
  60. T value_at(unsigned i) const { return m_array[i]; }
  61. private:
  62. T m_array[Size];
  63. };
  64. //--------------------------------------------------------pod_auto_vector
  65. template<class T, unsigned Size> class pod_auto_vector
  66. {
  67. public:
  68. typedef T value_type;
  69. typedef pod_auto_vector<T, Size> self_type;
  70. pod_auto_vector() : m_size(0) {}
  71. void remove_all() { m_size = 0; }
  72. void clear() { m_size = 0; }
  73. void add(const T& v) { m_array[m_size++] = v; }
  74. void push_back(const T& v) { m_array[m_size++] = v; }
  75. void inc_size(unsigned size) { m_size += size; }
  76. unsigned size() const { return m_size; }
  77. const T& operator [] (unsigned i) const { return m_array[i]; }
  78. T& operator [] (unsigned i) { return m_array[i]; }
  79. const T& at(unsigned i) const { return m_array[i]; }
  80. T& at(unsigned i) { return m_array[i]; }
  81. T value_at(unsigned i) const { return m_array[i]; }
  82. private:
  83. T m_array[Size];
  84. unsigned m_size;
  85. };
  86. //---------------------------------------------------------------pod_array
  87. template<class T> class pod_array
  88. {
  89. public:
  90. typedef T value_type;
  91. typedef pod_array<T> self_type;
  92. ~pod_array() { pod_allocator<T>::deallocate(m_array, m_size); }
  93. pod_array() : m_array(0), m_size(0) {}
  94. pod_array(unsigned size) :
  95. m_array(pod_allocator<T>::allocate(size)),
  96. m_size(size)
  97. {}
  98. pod_array(const self_type& v) :
  99. m_array(pod_allocator<T>::allocate(v.m_size)),
  100. m_size(v.m_size)
  101. {
  102. memcpy(m_array, v.m_array, sizeof(T) * m_size);
  103. }
  104. void resize(unsigned size)
  105. {
  106. if(size != m_size)
  107. {
  108. pod_allocator<T>::deallocate(m_array, m_size);
  109. m_array = pod_allocator<T>::allocate(m_size = size);
  110. }
  111. }
  112. const self_type& operator = (const self_type& v)
  113. {
  114. resize(v.size());
  115. memcpy(m_array, v.m_array, sizeof(T) * m_size);
  116. return *this;
  117. }
  118. unsigned size() const { return m_size; }
  119. const T& operator [] (unsigned i) const { return m_array[i]; }
  120. T& operator [] (unsigned i) { return m_array[i]; }
  121. const T& at(unsigned i) const { return m_array[i]; }
  122. T& at(unsigned i) { return m_array[i]; }
  123. T value_at(unsigned i) const { return m_array[i]; }
  124. const T* data() const { return m_array; }
  125. T* data() { return m_array; }
  126. private:
  127. T* m_array;
  128. unsigned m_size;
  129. };
  130. //--------------------------------------------------------------pod_vector
  131. // A simple class template to store Plain Old Data, a vector
  132. // of a fixed size. The data is continous in memory
  133. //------------------------------------------------------------------------
  134. template<class T> class pod_vector
  135. {
  136. public:
  137. typedef T value_type;
  138. ~pod_vector() { pod_allocator<T>::deallocate(m_array, m_capacity); }
  139. pod_vector() : m_size(0), m_capacity(0), m_array(0) {}
  140. pod_vector(unsigned cap, unsigned extra_tail=0);
  141. // Copying
  142. pod_vector(const pod_vector<T>&);
  143. const pod_vector<T>& operator = (const pod_vector<T>&);
  144. // Set new capacity. All data is lost, size is set to zero.
  145. void capacity(unsigned cap, unsigned extra_tail=0);
  146. unsigned capacity() const { return m_capacity; }
  147. // Allocate n elements. All data is lost,
  148. // but elements can be accessed in range 0...size-1.
  149. void allocate(unsigned size, unsigned extra_tail=0);
  150. // Resize keeping the content.
  151. void resize(unsigned new_size);
  152. void zero()
  153. {
  154. memset(m_array, 0, sizeof(T) * m_size);
  155. }
  156. void add(const T& v) { m_array[m_size++] = v; }
  157. void push_back(const T& v) { m_array[m_size++] = v; }
  158. void insert_at(unsigned pos, const T& val);
  159. void inc_size(unsigned size) { m_size += size; }
  160. unsigned size() const { return m_size; }
  161. unsigned byte_size() const { return m_size * sizeof(T); }
  162. void serialize(int8u* ptr) const;
  163. void deserialize(const int8u* data, unsigned byte_size);
  164. const T& operator [] (unsigned i) const { return m_array[i]; }
  165. T& operator [] (unsigned i) { return m_array[i]; }
  166. const T& at(unsigned i) const { return m_array[i]; }
  167. T& at(unsigned i) { return m_array[i]; }
  168. T value_at(unsigned i) const { return m_array[i]; }
  169. const T* data() const { return m_array; }
  170. T* data() { return m_array; }
  171. void remove_all() { m_size = 0; }
  172. void clear() { m_size = 0; }
  173. void cut_at(unsigned num) { if(num < m_size) m_size = num; }
  174. private:
  175. unsigned m_size;
  176. unsigned m_capacity;
  177. T* m_array;
  178. };
  179. //------------------------------------------------------------------------
  180. template<class T>
  181. void pod_vector<T>::capacity(unsigned cap, unsigned extra_tail)
  182. {
  183. m_size = 0;
  184. if(cap > m_capacity)
  185. {
  186. pod_allocator<T>::deallocate(m_array, m_capacity);
  187. m_capacity = cap + extra_tail;
  188. m_array = m_capacity ? pod_allocator<T>::allocate(m_capacity) : 0;
  189. }
  190. }
  191. //------------------------------------------------------------------------
  192. template<class T>
  193. void pod_vector<T>::allocate(unsigned size, unsigned extra_tail)
  194. {
  195. capacity(size, extra_tail);
  196. m_size = size;
  197. }
  198. //------------------------------------------------------------------------
  199. template<class T>
  200. void pod_vector<T>::resize(unsigned new_size)
  201. {
  202. if(new_size > m_size)
  203. {
  204. if(new_size > m_capacity)
  205. {
  206. T* data = pod_allocator<T>::allocate(new_size);
  207. memcpy(data, m_array, m_size * sizeof(T));
  208. pod_allocator<T>::deallocate(m_array, m_capacity);
  209. m_array = data;
  210. }
  211. }
  212. else
  213. {
  214. m_size = new_size;
  215. }
  216. }
  217. //------------------------------------------------------------------------
  218. template<class T> pod_vector<T>::pod_vector(unsigned cap, unsigned extra_tail) :
  219. m_size(0),
  220. m_capacity(cap + extra_tail),
  221. m_array(pod_allocator<T>::allocate(m_capacity)) {}
  222. //------------------------------------------------------------------------
  223. template<class T> pod_vector<T>::pod_vector(const pod_vector<T>& v) :
  224. m_size(v.m_size),
  225. m_capacity(v.m_capacity),
  226. m_array(v.m_capacity ? pod_allocator<T>::allocate(v.m_capacity) : 0)
  227. {
  228. memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
  229. }
  230. //------------------------------------------------------------------------
  231. template<class T> const pod_vector<T>&
  232. pod_vector<T>::operator = (const pod_vector<T>&v)
  233. {
  234. allocate(v.m_size);
  235. if(v.m_size) memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
  236. return *this;
  237. }
  238. //------------------------------------------------------------------------
  239. template<class T> void pod_vector<T>::serialize(int8u* ptr) const
  240. {
  241. if(m_size) memcpy(ptr, m_array, m_size * sizeof(T));
  242. }
  243. //------------------------------------------------------------------------
  244. template<class T>
  245. void pod_vector<T>::deserialize(const int8u* data, unsigned byte_size)
  246. {
  247. byte_size /= sizeof(T);
  248. allocate(byte_size);
  249. if(byte_size) memcpy(m_array, data, byte_size * sizeof(T));
  250. }
  251. //------------------------------------------------------------------------
  252. template<class T>
  253. void pod_vector<T>::insert_at(unsigned pos, const T& val)
  254. {
  255. if(pos >= m_size)
  256. {
  257. m_array[m_size] = val;
  258. }
  259. else
  260. {
  261. memmove(m_array + pos + 1, m_array + pos, (m_size - pos) * sizeof(T));
  262. m_array[pos] = val;
  263. }
  264. ++m_size;
  265. }
  266. //---------------------------------------------------------------pod_bvector
  267. // A simple class template to store Plain Old Data, similar to std::deque
  268. // It doesn't reallocate memory but instead, uses blocks of data of size
  269. // of (1 << S), that is, power of two. The data is NOT contiguous in memory,
  270. // so the only valid access method is operator [] or curr(), prev(), next()
  271. //
  272. // There reallocs occure only when the pool of pointers to blocks needs
  273. // to be extended (it happens very rarely). You can control the value
  274. // of increment to reallocate the pointer buffer. See the second constructor.
  275. // By default, the incremeent value equals (1 << S), i.e., the block size.
  276. //------------------------------------------------------------------------
  277. template<class T, unsigned S=6> class pod_bvector
  278. {
  279. public:
  280. enum block_scale_e
  281. {
  282. block_shift = S,
  283. block_size = 1 << block_shift,
  284. block_mask = block_size - 1
  285. };
  286. typedef T value_type;
  287. ~pod_bvector();
  288. pod_bvector();
  289. pod_bvector(unsigned block_ptr_inc);
  290. // Copying
  291. pod_bvector(const pod_bvector<T, S>& v);
  292. const pod_bvector<T, S>& operator = (const pod_bvector<T, S>& v);
  293. void remove_all() { m_size = 0; }
  294. void clear() { m_size = 0; }
  295. void free_all() { free_tail(0); }
  296. void free_tail(unsigned size);
  297. void add(const T& val);
  298. void push_back(const T& val) { add(val); }
  299. void modify_last(const T& val);
  300. void remove_last();
  301. int allocate_continuous_block(unsigned num_elements);
  302. void add_array(const T* ptr, unsigned num_elem)
  303. {
  304. while(num_elem--)
  305. {
  306. add(*ptr++);
  307. }
  308. }
  309. template<class DataAccessor> void add_data(DataAccessor& data)
  310. {
  311. while(data.size())
  312. {
  313. add(*data);
  314. ++data;
  315. }
  316. }
  317. void cut_at(unsigned size)
  318. {
  319. if(size < m_size) m_size = size;
  320. }
  321. unsigned size() const { return m_size; }
  322. const T& operator [] (unsigned i) const
  323. {
  324. return m_blocks[i >> block_shift][i & block_mask];
  325. }
  326. T& operator [] (unsigned i)
  327. {
  328. return m_blocks[i >> block_shift][i & block_mask];
  329. }
  330. const T& at(unsigned i) const
  331. {
  332. return m_blocks[i >> block_shift][i & block_mask];
  333. }
  334. T& at(unsigned i)
  335. {
  336. return m_blocks[i >> block_shift][i & block_mask];
  337. }
  338. T value_at(unsigned i) const
  339. {
  340. return m_blocks[i >> block_shift][i & block_mask];
  341. }
  342. const T& curr(unsigned idx) const
  343. {
  344. return (*this)[idx];
  345. }
  346. T& curr(unsigned idx)
  347. {
  348. return (*this)[idx];
  349. }
  350. const T& prev(unsigned idx) const
  351. {
  352. return (*this)[(idx + m_size - 1) % m_size];
  353. }
  354. T& prev(unsigned idx)
  355. {
  356. return (*this)[(idx + m_size - 1) % m_size];
  357. }
  358. const T& next(unsigned idx) const
  359. {
  360. return (*this)[(idx + 1) % m_size];
  361. }
  362. T& next(unsigned idx)
  363. {
  364. return (*this)[(idx + 1) % m_size];
  365. }
  366. const T& last() const
  367. {
  368. return (*this)[m_size - 1];
  369. }
  370. T& last()
  371. {
  372. return (*this)[m_size - 1];
  373. }
  374. unsigned byte_size() const;
  375. void serialize(int8u* ptr) const;
  376. void deserialize(const int8u* data, unsigned byte_size);
  377. void deserialize(unsigned start, const T& empty_val,
  378. const int8u* data, unsigned byte_size);
  379. template<class ByteAccessor>
  380. void deserialize(ByteAccessor data)
  381. {
  382. remove_all();
  383. unsigned elem_size = data.size() / sizeof(T);
  384. for(unsigned i = 0; i < elem_size; ++i)
  385. {
  386. int8u* ptr = (int8u*)data_ptr();
  387. for(unsigned j = 0; j < sizeof(T); ++j)
  388. {
  389. *ptr++ = *data;
  390. ++data;
  391. }
  392. ++m_size;
  393. }
  394. }
  395. template<class ByteAccessor>
  396. void deserialize(unsigned start, const T& empty_val, ByteAccessor data)
  397. {
  398. while(m_size < start)
  399. {
  400. add(empty_val);
  401. }
  402. unsigned elem_size = data.size() / sizeof(T);
  403. for(unsigned i = 0; i < elem_size; ++i)
  404. {
  405. int8u* ptr;
  406. if(start + i < m_size)
  407. {
  408. ptr = (int8u*)(&((*this)[start + i]));
  409. }
  410. else
  411. {
  412. ptr = (int8u*)data_ptr();
  413. ++m_size;
  414. }
  415. for(unsigned j = 0; j < sizeof(T); ++j)
  416. {
  417. *ptr++ = *data;
  418. ++data;
  419. }
  420. }
  421. }
  422. const T* block(unsigned nb) const { return m_blocks[nb]; }
  423. private:
  424. void allocate_block(unsigned nb);
  425. T* data_ptr();
  426. unsigned m_size;
  427. unsigned m_num_blocks;
  428. unsigned m_max_blocks;
  429. T** m_blocks;
  430. unsigned m_block_ptr_inc;
  431. };
  432. //------------------------------------------------------------------------
  433. template<class T, unsigned S> pod_bvector<T, S>::~pod_bvector()
  434. {
  435. if(m_num_blocks)
  436. {
  437. T** blk = m_blocks + m_num_blocks - 1;
  438. while(m_num_blocks--)
  439. {
  440. pod_allocator<T>::deallocate(*blk, block_size);
  441. --blk;
  442. }
  443. }
  444. pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
  445. }
  446. //------------------------------------------------------------------------
  447. template<class T, unsigned S>
  448. void pod_bvector<T, S>::free_tail(unsigned size)
  449. {
  450. if(size < m_size)
  451. {
  452. unsigned nb = (size + block_mask) >> block_shift;
  453. while(m_num_blocks > nb)
  454. {
  455. pod_allocator<T>::deallocate(m_blocks[--m_num_blocks], block_size);
  456. }
  457. if(m_num_blocks == 0)
  458. {
  459. pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
  460. m_blocks = 0;
  461. m_max_blocks = 0;
  462. }
  463. m_size = size;
  464. }
  465. }
  466. //------------------------------------------------------------------------
  467. template<class T, unsigned S> pod_bvector<T, S>::pod_bvector() :
  468. m_size(0),
  469. m_num_blocks(0),
  470. m_max_blocks(0),
  471. m_blocks(0),
  472. m_block_ptr_inc(block_size)
  473. {
  474. }
  475. //------------------------------------------------------------------------
  476. template<class T, unsigned S>
  477. pod_bvector<T, S>::pod_bvector(unsigned block_ptr_inc) :
  478. m_size(0),
  479. m_num_blocks(0),
  480. m_max_blocks(0),
  481. m_blocks(0),
  482. m_block_ptr_inc(block_ptr_inc)
  483. {
  484. }
  485. //------------------------------------------------------------------------
  486. template<class T, unsigned S>
  487. pod_bvector<T, S>::pod_bvector(const pod_bvector<T, S>& v) :
  488. m_size(v.m_size),
  489. m_num_blocks(v.m_num_blocks),
  490. m_max_blocks(v.m_max_blocks),
  491. m_blocks(v.m_max_blocks ?
  492. pod_allocator<T*>::allocate(v.m_max_blocks) :
  493. 0),
  494. m_block_ptr_inc(v.m_block_ptr_inc)
  495. {
  496. unsigned i;
  497. for(i = 0; i < v.m_num_blocks; ++i)
  498. {
  499. m_blocks[i] = pod_allocator<T>::allocate(block_size);
  500. memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
  501. }
  502. }
  503. //------------------------------------------------------------------------
  504. template<class T, unsigned S>
  505. const pod_bvector<T, S>&
  506. pod_bvector<T, S>::operator = (const pod_bvector<T, S>& v)
  507. {
  508. unsigned i;
  509. for(i = m_num_blocks; i < v.m_num_blocks; ++i)
  510. {
  511. allocate_block(i);
  512. }
  513. for(i = 0; i < v.m_num_blocks; ++i)
  514. {
  515. memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
  516. }
  517. m_size = v.m_size;
  518. return *this;
  519. }
  520. //------------------------------------------------------------------------
  521. template<class T, unsigned S>
  522. void pod_bvector<T, S>::allocate_block(unsigned nb)
  523. {
  524. if(nb >= m_max_blocks)
  525. {
  526. T** new_blocks = pod_allocator<T*>::allocate(m_max_blocks + m_block_ptr_inc);
  527. if(m_blocks)
  528. {
  529. memcpy(new_blocks,
  530. m_blocks,
  531. m_num_blocks * sizeof(T*));
  532. pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
  533. }
  534. m_blocks = new_blocks;
  535. m_max_blocks += m_block_ptr_inc;
  536. }
  537. m_blocks[nb] = pod_allocator<T>::allocate(block_size);
  538. m_num_blocks++;
  539. }
  540. //------------------------------------------------------------------------
  541. template<class T, unsigned S>
  542. inline T* pod_bvector<T, S>::data_ptr()
  543. {
  544. unsigned nb = m_size >> block_shift;
  545. if(nb >= m_num_blocks)
  546. {
  547. allocate_block(nb);
  548. }
  549. return m_blocks[nb] + (m_size & block_mask);
  550. }
  551. //------------------------------------------------------------------------
  552. template<class T, unsigned S>
  553. inline void pod_bvector<T, S>::add(const T& val)
  554. {
  555. *data_ptr() = val;
  556. ++m_size;
  557. }
  558. //------------------------------------------------------------------------
  559. template<class T, unsigned S>
  560. inline void pod_bvector<T, S>::remove_last()
  561. {
  562. if(m_size) --m_size;
  563. }
  564. //------------------------------------------------------------------------
  565. template<class T, unsigned S>
  566. void pod_bvector<T, S>::modify_last(const T& val)
  567. {
  568. remove_last();
  569. add(val);
  570. }
  571. //------------------------------------------------------------------------
  572. template<class T, unsigned S>
  573. int pod_bvector<T, S>::allocate_continuous_block(unsigned num_elements)
  574. {
  575. if(num_elements < block_size)
  576. {
  577. data_ptr(); // Allocate initial block if necessary
  578. unsigned rest = block_size - (m_size & block_mask);
  579. unsigned index;
  580. if(num_elements <= rest)
  581. {
  582. // The rest of the block is good, we can use it
  583. //-----------------
  584. index = m_size;
  585. m_size += num_elements;
  586. return index;
  587. }
  588. // New block
  589. //---------------
  590. m_size += rest;
  591. data_ptr();
  592. index = m_size;
  593. m_size += num_elements;
  594. return index;
  595. }
  596. return -1; // Impossible to allocate
  597. }
  598. //------------------------------------------------------------------------
  599. template<class T, unsigned S>
  600. unsigned pod_bvector<T, S>::byte_size() const
  601. {
  602. return m_size * sizeof(T);
  603. }
  604. //------------------------------------------------------------------------
  605. template<class T, unsigned S>
  606. void pod_bvector<T, S>::serialize(int8u* ptr) const
  607. {
  608. unsigned i;
  609. for(i = 0; i < m_size; i++)
  610. {
  611. memcpy(ptr, &(*this)[i], sizeof(T));
  612. ptr += sizeof(T);
  613. }
  614. }
  615. //------------------------------------------------------------------------
  616. template<class T, unsigned S>
  617. void pod_bvector<T, S>::deserialize(const int8u* data, unsigned byte_size)
  618. {
  619. remove_all();
  620. byte_size /= sizeof(T);
  621. for(unsigned i = 0; i < byte_size; ++i)
  622. {
  623. T* ptr = data_ptr();
  624. memcpy(ptr, data, sizeof(T));
  625. ++m_size;
  626. data += sizeof(T);
  627. }
  628. }
  629. // Replace or add a number of elements starting from "start" position
  630. //------------------------------------------------------------------------
  631. template<class T, unsigned S>
  632. void pod_bvector<T, S>::deserialize(unsigned start, const T& empty_val,
  633. const int8u* data, unsigned byte_size)
  634. {
  635. while(m_size < start)
  636. {
  637. add(empty_val);
  638. }
  639. byte_size /= sizeof(T);
  640. for(unsigned i = 0; i < byte_size; ++i)
  641. {
  642. if(start + i < m_size)
  643. {
  644. memcpy(&((*this)[start + i]), data, sizeof(T));
  645. }
  646. else
  647. {
  648. T* ptr = data_ptr();
  649. memcpy(ptr, data, sizeof(T));
  650. ++m_size;
  651. }
  652. data += sizeof(T);
  653. }
  654. }
  655. //---------------------------------------------------------block_allocator
  656. // Allocator for arbitrary POD data. Most usable in different cache
  657. // systems for efficient memory allocations.
  658. // Memory is allocated with blocks of fixed size ("block_size" in
  659. // the constructor). If required size exceeds the block size the allocator
  660. // creates a new block of the required size. However, the most efficient
  661. // use is when the average reqired size is much less than the block size.
  662. //------------------------------------------------------------------------
  663. class block_allocator
  664. {
  665. struct block_type
  666. {
  667. int8u* data;
  668. unsigned size;
  669. };
  670. public:
  671. void remove_all()
  672. {
  673. if(m_num_blocks)
  674. {
  675. block_type* blk = m_blocks + m_num_blocks - 1;
  676. while(m_num_blocks--)
  677. {
  678. pod_allocator<int8u>::deallocate(blk->data, blk->size);
  679. --blk;
  680. }
  681. pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
  682. }
  683. m_num_blocks = 0;
  684. m_max_blocks = 0;
  685. m_blocks = 0;
  686. m_buf_ptr = 0;
  687. m_rest = 0;
  688. }
  689. ~block_allocator()
  690. {
  691. remove_all();
  692. }
  693. block_allocator(unsigned block_size, unsigned block_ptr_inc=256-8) :
  694. m_block_size(block_size),
  695. m_block_ptr_inc(block_ptr_inc),
  696. m_num_blocks(0),
  697. m_max_blocks(0),
  698. m_blocks(0),
  699. m_buf_ptr(0),
  700. m_rest(0)
  701. {
  702. }
  703. int8u* allocate(unsigned size, unsigned alignment=1)
  704. {
  705. if(size == 0) return 0;
  706. if(size <= m_rest)
  707. {
  708. int8u* ptr = m_buf_ptr;
  709. if(alignment > 1)
  710. {
  711. unsigned align =
  712. (alignment - unsigned((size_t)ptr) % alignment) % alignment;
  713. size += align;
  714. ptr += align;
  715. if(size <= m_rest)
  716. {
  717. m_rest -= size;
  718. m_buf_ptr += size;
  719. return ptr;
  720. }
  721. allocate_block(size);
  722. return allocate(size - align, alignment);
  723. }
  724. m_rest -= size;
  725. m_buf_ptr += size;
  726. return ptr;
  727. }
  728. allocate_block(size + alignment - 1);
  729. return allocate(size, alignment);
  730. }
  731. private:
  732. void allocate_block(unsigned size)
  733. {
  734. if(size < m_block_size) size = m_block_size;
  735. if(m_num_blocks >= m_max_blocks)
  736. {
  737. block_type* new_blocks =
  738. pod_allocator<block_type>::allocate(m_max_blocks + m_block_ptr_inc);
  739. if(m_blocks)
  740. {
  741. memcpy(new_blocks,
  742. m_blocks,
  743. m_num_blocks * sizeof(block_type));
  744. pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
  745. }
  746. m_blocks = new_blocks;
  747. m_max_blocks += m_block_ptr_inc;
  748. }
  749. m_blocks[m_num_blocks].size = size;
  750. m_blocks[m_num_blocks].data =
  751. m_buf_ptr =
  752. pod_allocator<int8u>::allocate(size);
  753. m_num_blocks++;
  754. m_rest = size;
  755. }
  756. unsigned m_block_size;
  757. unsigned m_block_ptr_inc;
  758. unsigned m_num_blocks;
  759. unsigned m_max_blocks;
  760. block_type* m_blocks;
  761. int8u* m_buf_ptr;
  762. unsigned m_rest;
  763. };
  764. //------------------------------------------------------------------------
  765. enum quick_sort_threshold_e
  766. {
  767. quick_sort_threshold = 9
  768. };
  769. //-----------------------------------------------------------swap_elements
  770. template<class T> inline void swap_elements(T& a, T& b)
  771. {
  772. T temp = a;
  773. a = b;
  774. b = temp;
  775. }
  776. //--------------------------------------------------------------quick_sort
  777. template<class Array, class Less>
  778. void quick_sort(Array& arr, Less less)
  779. {
  780. if(arr.size() < 2) return;
  781. typename Array::value_type* e1;
  782. typename Array::value_type* e2;
  783. int stack[80];
  784. int* top = stack;
  785. int limit = arr.size();
  786. int base = 0;
  787. for(;;)
  788. {
  789. int len = limit - base;
  790. int i;
  791. int j;
  792. int pivot;
  793. if(len > quick_sort_threshold)
  794. {
  795. // we use base + len/2 as the pivot
  796. pivot = base + len / 2;
  797. swap_elements(arr[base], arr[pivot]);
  798. i = base + 1;
  799. j = limit - 1;
  800. // now ensure that *i <= *base <= *j
  801. e1 = &(arr[j]);
  802. e2 = &(arr[i]);
  803. if(less(*e1, *e2)) swap_elements(*e1, *e2);
  804. e1 = &(arr[base]);
  805. e2 = &(arr[i]);
  806. if(less(*e1, *e2)) swap_elements(*e1, *e2);
  807. e1 = &(arr[j]);
  808. e2 = &(arr[base]);
  809. if(less(*e1, *e2)) swap_elements(*e1, *e2);
  810. for(;;)
  811. {
  812. do i++; while( less(arr[i], arr[base]) );
  813. do j--; while( less(arr[base], arr[j]) );
  814. if( i > j )
  815. {
  816. break;
  817. }
  818. swap_elements(arr[i], arr[j]);
  819. }
  820. swap_elements(arr[base], arr[j]);
  821. // now, push the largest sub-array
  822. if(j - base > limit - i)
  823. {
  824. top[0] = base;
  825. top[1] = j;
  826. base = i;
  827. }
  828. else
  829. {
  830. top[0] = i;
  831. top[1] = limit;
  832. limit = j;
  833. }
  834. top += 2;
  835. }
  836. else
  837. {
  838. // the sub-array is small, perform insertion sort
  839. j = base;
  840. i = j + 1;
  841. for(; i < limit; j = i, i++)
  842. {
  843. for(; less(*(e1 = &(arr[j + 1])), *(e2 = &(arr[j]))); j--)
  844. {
  845. swap_elements(*e1, *e2);
  846. if(j == base)
  847. {
  848. break;
  849. }
  850. }
  851. }
  852. if(top > stack)
  853. {
  854. top -= 2;
  855. base = top[0];
  856. limit = top[1];
  857. }
  858. else
  859. {
  860. break;
  861. }
  862. }
  863. }
  864. }
  865. //------------------------------------------------------remove_duplicates
  866. // Remove duplicates from a sorted array. It doesn't cut the
  867. // tail of the array, it just returns the number of remaining elements.
  868. //-----------------------------------------------------------------------
  869. template<class Array, class Equal>
  870. unsigned remove_duplicates(Array& arr, Equal equal)
  871. {
  872. if(arr.size() < 2) return arr.size();
  873. unsigned i, j;
  874. for(i = 1, j = 1; i < arr.size(); i++)
  875. {
  876. typename Array::value_type& e = arr[i];
  877. if(!equal(e, arr[i - 1]))
  878. {
  879. arr[j++] = e;
  880. }
  881. }
  882. return j;
  883. }
  884. //--------------------------------------------------------invert_container
  885. template<class Array> void invert_container(Array& arr)
  886. {
  887. int i = 0;
  888. int j = arr.size() - 1;
  889. while(i < j)
  890. {
  891. swap_elements(arr[i++], arr[j--]);
  892. }
  893. }
  894. //------------------------------------------------------binary_search_pos
  895. template<class Array, class Value, class Less>
  896. unsigned binary_search_pos(const Array& arr, const Value& val, Less less)
  897. {
  898. if(arr.size() == 0) return 0;
  899. unsigned beg = 0;
  900. unsigned end = arr.size() - 1;
  901. if(less(val, arr[0])) return 0;
  902. if(less(arr[end], val)) return end + 1;
  903. while(end - beg > 1)
  904. {
  905. unsigned mid = (end + beg) >> 1;
  906. if(less(val, arr[mid])) end = mid;
  907. else beg = mid;
  908. }
  909. //if(beg <= 0 && less(val, arr[0])) return 0;
  910. //if(end >= arr.size() - 1 && less(arr[end], val)) ++end;
  911. return end;
  912. }
  913. //----------------------------------------------------------range_adaptor
  914. template<class Array> class range_adaptor
  915. {
  916. public:
  917. typedef typename Array::value_type value_type;
  918. range_adaptor(Array& array, unsigned start, unsigned size) :
  919. m_array(array), m_start(start), m_size(size)
  920. {}
  921. unsigned size() const { return m_size; }
  922. const value_type& operator [] (unsigned i) const { return m_array[m_start + i]; }
  923. value_type& operator [] (unsigned i) { return m_array[m_start + i]; }
  924. const value_type& at(unsigned i) const { return m_array[m_start + i]; }
  925. value_type& at(unsigned i) { return m_array[m_start + i]; }
  926. value_type value_at(unsigned i) const { return m_array[m_start + i]; }
  927. private:
  928. Array& m_array;
  929. unsigned m_start;
  930. unsigned m_size;
  931. };
  932. //---------------------------------------------------------------int_less
  933. inline bool int_less(int a, int b) { return a < b; }
  934. //------------------------------------------------------------int_greater
  935. inline bool int_greater(int a, int b) { return a > b; }
  936. //----------------------------------------------------------unsigned_less
  937. inline bool unsigned_less(unsigned a, unsigned b) { return a < b; }
  938. //-------------------------------------------------------unsigned_greater
  939. inline bool unsigned_greater(unsigned a, unsigned b) { return a > b; }
  940. }
  941. #endif