pandas.conf 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. # netdata python.d.plugin configuration for pandas
  2. #
  3. # This file is in YaML format. Generally the format is:
  4. #
  5. # name: value
  6. #
  7. # There are 2 sections:
  8. # - global variables
  9. # - one or more JOBS
  10. #
  11. # JOBS allow you to collect values from multiple sources.
  12. # Each source will have its own set of charts.
  13. #
  14. # JOB parameters have to be indented (using spaces only, example below).
  15. # ----------------------------------------------------------------------
  16. # Global Variables
  17. # These variables set the defaults for all JOBs, however each JOB
  18. # may define its own, overriding the defaults.
  19. # update_every sets the default data collection frequency.
  20. # If unset, the python.d.plugin default is used.
  21. update_every: 5
  22. # priority controls the order of charts at the netdata dashboard.
  23. # Lower numbers move the charts towards the top of the page.
  24. # If unset, the default for python.d.plugin is used.
  25. # priority: 60000
  26. # penalty indicates whether to apply penalty to update_every in case of failures.
  27. # Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
  28. # penalty: yes
  29. # autodetection_retry sets the job re-check interval in seconds.
  30. # The job is not deleted if check fails.
  31. # Attempts to start the job are made once every autodetection_retry.
  32. # This feature is disabled by default.
  33. # autodetection_retry: 0
  34. # ----------------------------------------------------------------------
  35. # JOBS (data collection sources)
  36. #
  37. # The default JOBS share the same *name*. JOBS with the same name
  38. # are mutually exclusive. Only one of them will be allowed running at
  39. # any time. This allows autodetection to try several alternatives and
  40. # pick the one that works.
  41. #
  42. # Any number of jobs is supported.
  43. #
  44. # All python.d.plugin JOBS (for all its modules) support a set of
  45. # predefined parameters. These are:
  46. #
  47. # job_name:
  48. # name: myname # the JOB's name as it will appear on the dashboard
  49. # # dashboard (by default is the job_name)
  50. # # JOBs sharing a name are mutually exclusive
  51. # update_every: 1 # the JOB's data collection frequency
  52. # priority: 60000 # the JOB's order on the dashboard
  53. # penalty: yes # the JOB's penalty
  54. # autodetection_retry: 0 # the JOB's re-check interval in seconds
  55. #
  56. # Additionally to the above, example also supports the following:
  57. #
  58. # chart_configs: [<dictionary>] # an array for chart config dictionaries.
  59. #
  60. # ----------------------------------------------------------------------
  61. # AUTO-DETECTION JOBS
  62. # Some example configurations, enable this collector, uncomment and example below and restart netdata to enable.
  63. # example pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.
  64. # temperature:
  65. # name: "temperature"
  66. # update_every: 5
  67. # chart_configs:
  68. # - name: "temperature_forecast_by_city"
  69. # title: "Temperature By City - Today Forecast"
  70. # family: "temperature.today"
  71. # context: "pandas.temperature"
  72. # type: "line"
  73. # units: "Celsius"
  74. # df_steps: >
  75. # pd.DataFrame.from_dict(
  76. # {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']
  77. # for (city,lat,lng)
  78. # in [
  79. # ('dublin', 53.3441, -6.2675),
  80. # ('athens', 37.9792, 23.7166),
  81. # ('london', 51.5002, -0.1262),
  82. # ('berlin', 52.5235, 13.4115),
  83. # ('paris', 48.8567, 2.3510),
  84. # ('madrid', 40.4167, -3.7033),
  85. # ('new_york', 40.71, -74.01),
  86. # ('los_angeles', 34.05, -118.24),
  87. # ]
  88. # }
  89. # );
  90. # df.describe(); # get aggregate stats for each city;
  91. # df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;
  92. # df.rename(columns={'index':'city'}); # some column renaming;
  93. # df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;
  94. # df.rename(columns={0:'degrees'}); # some column renaming;
  95. # pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;
  96. # df.rename(columns={0:'measurement'}); # some column renaming;
  97. # df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;
  98. # df.sort_index(); # sort by city name;
  99. # df.transpose(); # transpose so its just one wide row;
  100. # - name: "temperature_current_by_city"
  101. # title: "Temperature By City - Current"
  102. # family: "temperature.current"
  103. # context: "pandas.temperature"
  104. # type: "line"
  105. # units: "Celsius"
  106. # df_steps: >
  107. # pd.DataFrame.from_dict(
  108. # {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&current_weather=true').json()['current_weather']
  109. # for (city,lat,lng)
  110. # in [
  111. # ('dublin', 53.3441, -6.2675),
  112. # ('athens', 37.9792, 23.7166),
  113. # ('london', 51.5002, -0.1262),
  114. # ('berlin', 52.5235, 13.4115),
  115. # ('paris', 48.8567, 2.3510),
  116. # ('madrid', 40.4167, -3.7033),
  117. # ('new_york', 40.71, -74.01),
  118. # ('los_angeles', 34.05, -118.24),
  119. # ]
  120. # }
  121. # );
  122. # df.transpose();
  123. # df[['temperature']];
  124. # df.transpose();
  125. # example showing a read_csv from a url and some light pandas data wrangling.
  126. # pull data in csv format from london demo server and then ratio of user cpus over system cpu averaged over last 60 seconds.
  127. # example_csv:
  128. # name: "example_csv"
  129. # update_every: 2
  130. # chart_configs:
  131. # - name: "london_system_cpu"
  132. # title: "London System CPU - Ratios"
  133. # family: "london_system_cpu"
  134. # context: "pandas"
  135. # type: "line"
  136. # units: "n"
  137. # df_steps: >
  138. # pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});
  139. # df.drop('time', axis=1);
  140. # df.mean().to_frame().transpose();
  141. # df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();
  142. # df.rename(columns={0:'average_user_system_ratio'});
  143. # df*100;
  144. # example showing a read_json from a url and some light pandas data wrangling.
  145. # pull data in json format (using requests.get() if json data is too complex for pd.read_json() ) from london demo server and work out 'total_bandwidth'.
  146. # example_json:
  147. # name: "example_json"
  148. # update_every: 2
  149. # chart_configs:
  150. # - name: "london_system_net"
  151. # title: "London System Net - Total Bandwidth"
  152. # family: "london_system_net"
  153. # context: "pandas"
  154. # type: "area"
  155. # units: "kilobits/s"
  156. # df_steps: >
  157. # pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);
  158. # df.drop('time', axis=1);
  159. # abs(df);
  160. # df.sum(axis=1).to_frame();
  161. # df.rename(columns={0:'total_bandwidth'});
  162. # example showing a read_xml from a url and some light pandas data wrangling.
  163. # pull weather forecast data in xml format, use xpath to pull out temperature forecast.
  164. # example_xml:
  165. # name: "example_xml"
  166. # update_every: 2
  167. # line_sep: "|"
  168. # chart_configs:
  169. # - name: "temperature_forcast"
  170. # title: "Temperature Forecast"
  171. # family: "temp"
  172. # context: "pandas.temp"
  173. # type: "line"
  174. # units: "celsius"
  175. # df_steps: >
  176. # pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|
  177. # df.rename(columns={'value': 'dublin'})|
  178. # df[['dublin']]|
  179. # example showing a read_sql from a postgres database using sqlalchemy.
  180. # note: example assumes a running postgress db on localhost with a netdata users and password netdata.
  181. # sql:
  182. # name: "sql"
  183. # update_every: 5
  184. # chart_configs:
  185. # - name: "sql"
  186. # title: "SQL Example"
  187. # family: "sql.example"
  188. # context: "example"
  189. # type: "line"
  190. # units: "percent"
  191. # df_steps: >
  192. # pd.read_sql_query(
  193. # sql='\
  194. # select \
  195. # random()*100 as metric_1, \
  196. # random()*100 as metric_2 \
  197. # ',
  198. # con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')
  199. # );